gabion 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- gabion/__init__.py +5 -0
- gabion/__main__.py +11 -0
- gabion/analysis/__init__.py +37 -0
- gabion/analysis/dataflow_audit.py +3173 -0
- gabion/analysis/engine.py +8 -0
- gabion/analysis/model.py +45 -0
- gabion/analysis/visitors.py +402 -0
- gabion/cli.py +503 -0
- gabion/config.py +45 -0
- gabion/lsp_client.py +111 -0
- gabion/refactor/__init__.py +4 -0
- gabion/refactor/engine.py +726 -0
- gabion/refactor/model.py +37 -0
- gabion/schema.py +84 -0
- gabion/server.py +447 -0
- gabion/synthesis/__init__.py +26 -0
- gabion/synthesis/merge.py +41 -0
- gabion/synthesis/model.py +41 -0
- gabion/synthesis/naming.py +45 -0
- gabion/synthesis/protocols.py +74 -0
- gabion/synthesis/schedule.py +87 -0
- gabion-0.1.0.dist-info/METADATA +250 -0
- gabion-0.1.0.dist-info/RECORD +26 -0
- gabion-0.1.0.dist-info/WHEEL +4 -0
- gabion-0.1.0.dist-info/entry_points.txt +3 -0
- gabion-0.1.0.dist-info/licenses/LICENSE +190 -0
gabion/refactor/model.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import List, Optional, Tuple
|
|
5
|
+
|
|
6
|
+
Position = Tuple[int, int]
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
@dataclass(frozen=True)
|
|
10
|
+
class TextEdit:
|
|
11
|
+
path: str
|
|
12
|
+
start: Position
|
|
13
|
+
end: Position
|
|
14
|
+
replacement: str
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
@dataclass(frozen=True)
|
|
18
|
+
class FieldSpec:
|
|
19
|
+
name: str
|
|
20
|
+
type_hint: Optional[str] = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(frozen=True)
|
|
24
|
+
class RefactorRequest:
|
|
25
|
+
protocol_name: str
|
|
26
|
+
bundle: List[str]
|
|
27
|
+
target_path: str
|
|
28
|
+
fields: List[FieldSpec] = field(default_factory=list)
|
|
29
|
+
target_functions: List[str] = field(default_factory=list)
|
|
30
|
+
rationale: Optional[str] = None
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
@dataclass
|
|
34
|
+
class RefactorPlan:
|
|
35
|
+
edits: List[TextEdit] = field(default_factory=list)
|
|
36
|
+
warnings: List[str] = field(default_factory=list)
|
|
37
|
+
errors: List[str] = field(default_factory=list)
|
gabion/schema.py
ADDED
|
@@ -0,0 +1,84 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Dict, List, Optional, Tuple
|
|
4
|
+
|
|
5
|
+
from pydantic import BaseModel
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
class BundleDTO(BaseModel):
|
|
9
|
+
fields: List[str]
|
|
10
|
+
locations: List[str]
|
|
11
|
+
suggested_name: str
|
|
12
|
+
tier: int
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
class AnalysisRequest(BaseModel):
|
|
16
|
+
root_path: str
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class AnalysisResponse(BaseModel):
|
|
20
|
+
bundles: List[BundleDTO]
|
|
21
|
+
stats: Dict[str, int]
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
class SynthesisBundleDTO(BaseModel):
|
|
25
|
+
bundle: List[str]
|
|
26
|
+
tier: int
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
class SynthesisRequest(BaseModel):
|
|
30
|
+
bundles: List[SynthesisBundleDTO]
|
|
31
|
+
field_types: Dict[str, str] = {}
|
|
32
|
+
existing_names: List[str] = []
|
|
33
|
+
frequency: Dict[str, int] = {}
|
|
34
|
+
fallback_prefix: str = "Bundle"
|
|
35
|
+
max_tier: int = 2
|
|
36
|
+
min_bundle_size: int = 2
|
|
37
|
+
allow_singletons: bool = False
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class SynthesisFieldDTO(BaseModel):
|
|
41
|
+
name: str
|
|
42
|
+
type_hint: Optional[str] = None
|
|
43
|
+
source_params: List[str] = []
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class SynthesisProtocolDTO(BaseModel):
|
|
47
|
+
name: str
|
|
48
|
+
fields: List[SynthesisFieldDTO]
|
|
49
|
+
bundle: List[str]
|
|
50
|
+
tier: int
|
|
51
|
+
rationale: Optional[str] = None
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
class SynthesisResponse(BaseModel):
|
|
55
|
+
protocols: List[SynthesisProtocolDTO]
|
|
56
|
+
warnings: List[str] = []
|
|
57
|
+
errors: List[str] = []
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class RefactorFieldDTO(BaseModel):
|
|
61
|
+
name: str
|
|
62
|
+
type_hint: Optional[str] = None
|
|
63
|
+
|
|
64
|
+
|
|
65
|
+
class RefactorRequest(BaseModel):
|
|
66
|
+
protocol_name: str
|
|
67
|
+
bundle: List[str]
|
|
68
|
+
fields: List[RefactorFieldDTO] = []
|
|
69
|
+
target_path: str
|
|
70
|
+
target_functions: List[str] = []
|
|
71
|
+
rationale: Optional[str] = None
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class TextEditDTO(BaseModel):
|
|
75
|
+
path: str
|
|
76
|
+
start: Tuple[int, int]
|
|
77
|
+
end: Tuple[int, int]
|
|
78
|
+
replacement: str
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class RefactorResponse(BaseModel):
|
|
82
|
+
edits: List[TextEditDTO] = []
|
|
83
|
+
warnings: List[str] = []
|
|
84
|
+
errors: List[str] = []
|
gabion/server.py
ADDED
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import json
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from urllib.parse import unquote, urlparse
|
|
6
|
+
|
|
7
|
+
from pygls.lsp.server import LanguageServer
|
|
8
|
+
from lsprotocol.types import (
|
|
9
|
+
TEXT_DOCUMENT_DID_OPEN,
|
|
10
|
+
TEXT_DOCUMENT_DID_SAVE,
|
|
11
|
+
TEXT_DOCUMENT_CODE_ACTION,
|
|
12
|
+
CodeAction,
|
|
13
|
+
CodeActionKind,
|
|
14
|
+
CodeActionParams,
|
|
15
|
+
Command,
|
|
16
|
+
Diagnostic,
|
|
17
|
+
DiagnosticSeverity,
|
|
18
|
+
Position,
|
|
19
|
+
Range,
|
|
20
|
+
WorkspaceEdit,
|
|
21
|
+
)
|
|
22
|
+
|
|
23
|
+
from gabion.analysis import (
|
|
24
|
+
AuditConfig,
|
|
25
|
+
analyze_paths,
|
|
26
|
+
apply_baseline,
|
|
27
|
+
compute_violations,
|
|
28
|
+
build_refactor_plan,
|
|
29
|
+
build_synthesis_plan,
|
|
30
|
+
load_baseline,
|
|
31
|
+
render_dot,
|
|
32
|
+
render_protocol_stubs,
|
|
33
|
+
render_refactor_plan,
|
|
34
|
+
render_report,
|
|
35
|
+
render_synthesis_section,
|
|
36
|
+
resolve_baseline_path,
|
|
37
|
+
write_baseline,
|
|
38
|
+
)
|
|
39
|
+
from gabion.config import dataflow_defaults, merge_payload
|
|
40
|
+
from gabion.refactor import (
|
|
41
|
+
FieldSpec,
|
|
42
|
+
RefactorEngine,
|
|
43
|
+
RefactorRequest as RefactorRequestModel,
|
|
44
|
+
)
|
|
45
|
+
from gabion.schema import (
|
|
46
|
+
RefactorRequest,
|
|
47
|
+
RefactorResponse,
|
|
48
|
+
SynthesisResponse,
|
|
49
|
+
SynthesisRequest,
|
|
50
|
+
TextEditDTO,
|
|
51
|
+
)
|
|
52
|
+
from gabion.synthesis import NamingContext, SynthesisConfig, Synthesizer
|
|
53
|
+
|
|
54
|
+
server = LanguageServer("gabion", "0.1.0")
|
|
55
|
+
DATAFLOW_COMMAND = "gabion.dataflowAudit"
|
|
56
|
+
SYNTHESIS_COMMAND = "gabion.synthesisPlan"
|
|
57
|
+
REFACTOR_COMMAND = "gabion.refactorProtocol"
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _uri_to_path(uri: str) -> Path:
|
|
61
|
+
parsed = urlparse(uri)
|
|
62
|
+
if parsed.scheme == "file":
|
|
63
|
+
return Path(unquote(parsed.path))
|
|
64
|
+
return Path(uri)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _normalize_transparent_decorators(value: object) -> set[str] | None:
|
|
68
|
+
if value is None:
|
|
69
|
+
return None
|
|
70
|
+
items: list[str] = []
|
|
71
|
+
if isinstance(value, str):
|
|
72
|
+
items = [part.strip() for part in value.split(",") if part.strip()]
|
|
73
|
+
elif isinstance(value, (list, tuple, set)):
|
|
74
|
+
for item in value:
|
|
75
|
+
if isinstance(item, str):
|
|
76
|
+
items.extend([part.strip() for part in item.split(",") if part.strip()])
|
|
77
|
+
if not items:
|
|
78
|
+
return None
|
|
79
|
+
return set(items)
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def _diagnostics_for_path(path_str: str, project_root: Path | None) -> list[Diagnostic]:
|
|
83
|
+
result = analyze_paths(
|
|
84
|
+
[Path(path_str)],
|
|
85
|
+
recursive=True,
|
|
86
|
+
type_audit=False,
|
|
87
|
+
type_audit_report=False,
|
|
88
|
+
type_audit_max=0,
|
|
89
|
+
include_constant_smells=False,
|
|
90
|
+
include_unused_arg_smells=False,
|
|
91
|
+
config=AuditConfig(project_root=project_root),
|
|
92
|
+
)
|
|
93
|
+
diagnostics: list[Diagnostic] = []
|
|
94
|
+
for path, bundles in result.groups_by_path.items():
|
|
95
|
+
span_map = result.param_spans_by_path.get(path, {})
|
|
96
|
+
for fn_name, group_list in bundles.items():
|
|
97
|
+
param_spans = span_map.get(fn_name, {})
|
|
98
|
+
for bundle in group_list:
|
|
99
|
+
message = f"Implicit bundle detected: {', '.join(sorted(bundle))}"
|
|
100
|
+
for name in sorted(bundle):
|
|
101
|
+
span = param_spans.get(name)
|
|
102
|
+
if span is None:
|
|
103
|
+
start = Position(line=0, character=0)
|
|
104
|
+
end = Position(line=0, character=1)
|
|
105
|
+
else:
|
|
106
|
+
start_line, start_col, end_line, end_col = span
|
|
107
|
+
start = Position(line=start_line, character=start_col)
|
|
108
|
+
end = Position(line=end_line, character=end_col)
|
|
109
|
+
diagnostics.append(
|
|
110
|
+
Diagnostic(
|
|
111
|
+
range=Range(start=start, end=end),
|
|
112
|
+
message=message,
|
|
113
|
+
severity=DiagnosticSeverity.Information,
|
|
114
|
+
source="gabion",
|
|
115
|
+
)
|
|
116
|
+
)
|
|
117
|
+
return diagnostics
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
@server.command(DATAFLOW_COMMAND)
|
|
121
|
+
def execute_command(ls: LanguageServer, payload: dict | None = None) -> dict:
|
|
122
|
+
if payload is None:
|
|
123
|
+
payload = {}
|
|
124
|
+
root = payload.get("root") or ls.workspace.root_path or "."
|
|
125
|
+
config_path = payload.get("config")
|
|
126
|
+
defaults = dataflow_defaults(
|
|
127
|
+
Path(root), Path(config_path) if config_path else None
|
|
128
|
+
)
|
|
129
|
+
payload = merge_payload(payload, defaults)
|
|
130
|
+
|
|
131
|
+
raw_paths = payload.get("paths") or []
|
|
132
|
+
if raw_paths:
|
|
133
|
+
paths = [Path(p) for p in raw_paths]
|
|
134
|
+
else:
|
|
135
|
+
paths = [Path(root)]
|
|
136
|
+
root = payload.get("root") or root
|
|
137
|
+
report_path = payload.get("report")
|
|
138
|
+
dot_path = payload.get("dot")
|
|
139
|
+
fail_on_violations = payload.get("fail_on_violations", False)
|
|
140
|
+
no_recursive = payload.get("no_recursive", False)
|
|
141
|
+
max_components = payload.get("max_components", 10)
|
|
142
|
+
type_audit = payload.get("type_audit", False)
|
|
143
|
+
type_audit_report = payload.get("type_audit_report", False)
|
|
144
|
+
type_audit_max = payload.get("type_audit_max", 50)
|
|
145
|
+
fail_on_type_ambiguities = payload.get("fail_on_type_ambiguities", False)
|
|
146
|
+
exclude_dirs = set(payload.get("exclude", []))
|
|
147
|
+
ignore_params = set(payload.get("ignore_params", []))
|
|
148
|
+
allow_external = payload.get("allow_external", False)
|
|
149
|
+
strictness = payload.get("strictness", "high")
|
|
150
|
+
transparent_decorators = _normalize_transparent_decorators(
|
|
151
|
+
payload.get("transparent_decorators")
|
|
152
|
+
)
|
|
153
|
+
baseline_path = resolve_baseline_path(payload.get("baseline"), Path(root))
|
|
154
|
+
baseline_write = bool(payload.get("baseline_write", False)) and baseline_path is not None
|
|
155
|
+
synthesis_plan_path = payload.get("synthesis_plan")
|
|
156
|
+
synthesis_report = payload.get("synthesis_report", False)
|
|
157
|
+
synthesis_max_tier = payload.get("synthesis_max_tier", 2)
|
|
158
|
+
synthesis_min_bundle_size = payload.get("synthesis_min_bundle_size", 2)
|
|
159
|
+
synthesis_allow_singletons = payload.get("synthesis_allow_singletons", False)
|
|
160
|
+
synthesis_protocols_path = payload.get("synthesis_protocols")
|
|
161
|
+
synthesis_protocols_kind = payload.get("synthesis_protocols_kind", "dataclass")
|
|
162
|
+
refactor_plan = payload.get("refactor_plan", False)
|
|
163
|
+
refactor_plan_json = payload.get("refactor_plan_json")
|
|
164
|
+
|
|
165
|
+
config = AuditConfig(
|
|
166
|
+
project_root=Path(root),
|
|
167
|
+
exclude_dirs=exclude_dirs,
|
|
168
|
+
ignore_params=ignore_params,
|
|
169
|
+
external_filter=not allow_external,
|
|
170
|
+
strictness=strictness,
|
|
171
|
+
transparent_decorators=transparent_decorators,
|
|
172
|
+
)
|
|
173
|
+
if fail_on_type_ambiguities:
|
|
174
|
+
type_audit = True
|
|
175
|
+
analysis = analyze_paths(
|
|
176
|
+
paths,
|
|
177
|
+
recursive=not no_recursive,
|
|
178
|
+
type_audit=type_audit or type_audit_report,
|
|
179
|
+
type_audit_report=type_audit_report,
|
|
180
|
+
type_audit_max=type_audit_max,
|
|
181
|
+
include_constant_smells=bool(report_path),
|
|
182
|
+
include_unused_arg_smells=bool(report_path),
|
|
183
|
+
config=config,
|
|
184
|
+
)
|
|
185
|
+
|
|
186
|
+
response: dict = {
|
|
187
|
+
"type_suggestions": analysis.type_suggestions,
|
|
188
|
+
"type_ambiguities": analysis.type_ambiguities,
|
|
189
|
+
"unused_arg_smells": analysis.unused_arg_smells,
|
|
190
|
+
}
|
|
191
|
+
|
|
192
|
+
synthesis_plan: dict[str, object] | None = None
|
|
193
|
+
if synthesis_plan_path or synthesis_report or synthesis_protocols_path:
|
|
194
|
+
synthesis_plan = build_synthesis_plan(
|
|
195
|
+
analysis.groups_by_path,
|
|
196
|
+
project_root=Path(root),
|
|
197
|
+
max_tier=int(synthesis_max_tier),
|
|
198
|
+
min_bundle_size=int(synthesis_min_bundle_size),
|
|
199
|
+
allow_singletons=bool(synthesis_allow_singletons),
|
|
200
|
+
config=config,
|
|
201
|
+
)
|
|
202
|
+
if synthesis_plan_path:
|
|
203
|
+
payload_json = json.dumps(synthesis_plan, indent=2, sort_keys=True)
|
|
204
|
+
if synthesis_plan_path == "-":
|
|
205
|
+
response["synthesis_plan"] = synthesis_plan
|
|
206
|
+
else:
|
|
207
|
+
Path(synthesis_plan_path).write_text(payload_json)
|
|
208
|
+
if synthesis_protocols_path:
|
|
209
|
+
stubs = render_protocol_stubs(
|
|
210
|
+
synthesis_plan, kind=str(synthesis_protocols_kind)
|
|
211
|
+
)
|
|
212
|
+
if synthesis_protocols_path == "-":
|
|
213
|
+
response["synthesis_protocols"] = stubs
|
|
214
|
+
else:
|
|
215
|
+
Path(synthesis_protocols_path).write_text(stubs)
|
|
216
|
+
|
|
217
|
+
refactor_plan_payload: dict[str, object] | None = None
|
|
218
|
+
if refactor_plan or refactor_plan_json:
|
|
219
|
+
refactor_plan_payload = build_refactor_plan(
|
|
220
|
+
analysis.groups_by_path,
|
|
221
|
+
paths,
|
|
222
|
+
config=config,
|
|
223
|
+
)
|
|
224
|
+
if refactor_plan_json:
|
|
225
|
+
payload_json = json.dumps(refactor_plan_payload, indent=2, sort_keys=True)
|
|
226
|
+
if refactor_plan_json == "-":
|
|
227
|
+
response["refactor_plan"] = refactor_plan_payload
|
|
228
|
+
else:
|
|
229
|
+
Path(refactor_plan_json).write_text(payload_json)
|
|
230
|
+
|
|
231
|
+
if dot_path:
|
|
232
|
+
dot = render_dot(analysis.groups_by_path)
|
|
233
|
+
if dot_path == "-":
|
|
234
|
+
response["dot"] = dot
|
|
235
|
+
else:
|
|
236
|
+
Path(dot_path).write_text(dot)
|
|
237
|
+
|
|
238
|
+
violations: list[str] = []
|
|
239
|
+
effective_violations: list[str] | None = None
|
|
240
|
+
if report_path:
|
|
241
|
+
report, violations = render_report(
|
|
242
|
+
analysis.groups_by_path,
|
|
243
|
+
max_components,
|
|
244
|
+
type_suggestions=analysis.type_suggestions if type_audit_report else None,
|
|
245
|
+
type_ambiguities=analysis.type_ambiguities if type_audit_report else None,
|
|
246
|
+
constant_smells=analysis.constant_smells,
|
|
247
|
+
unused_arg_smells=analysis.unused_arg_smells,
|
|
248
|
+
)
|
|
249
|
+
if baseline_path is not None:
|
|
250
|
+
baseline_entries = load_baseline(baseline_path)
|
|
251
|
+
if baseline_write:
|
|
252
|
+
write_baseline(baseline_path, violations)
|
|
253
|
+
baseline_entries = set(violations)
|
|
254
|
+
effective_violations = []
|
|
255
|
+
else:
|
|
256
|
+
effective_violations, _ = apply_baseline(violations, baseline_entries)
|
|
257
|
+
report = (
|
|
258
|
+
report
|
|
259
|
+
+ "\n\nBaseline/Ratchet:\n```\n"
|
|
260
|
+
+ f"Baseline: {baseline_path}\n"
|
|
261
|
+
+ f"Baseline entries: {len(baseline_entries)}\n"
|
|
262
|
+
+ f"New violations: {len(effective_violations)}\n"
|
|
263
|
+
+ "```\n"
|
|
264
|
+
)
|
|
265
|
+
if synthesis_plan and (
|
|
266
|
+
synthesis_report or synthesis_plan_path or synthesis_protocols_path
|
|
267
|
+
):
|
|
268
|
+
report = report + render_synthesis_section(synthesis_plan)
|
|
269
|
+
if refactor_plan_payload and (refactor_plan or refactor_plan_json):
|
|
270
|
+
report = report + render_refactor_plan(refactor_plan_payload)
|
|
271
|
+
Path(report_path).write_text(report)
|
|
272
|
+
else:
|
|
273
|
+
violations = compute_violations(
|
|
274
|
+
analysis.groups_by_path,
|
|
275
|
+
max_components,
|
|
276
|
+
type_suggestions=analysis.type_suggestions if type_audit_report else None,
|
|
277
|
+
type_ambiguities=analysis.type_ambiguities if type_audit_report else None,
|
|
278
|
+
)
|
|
279
|
+
if baseline_path is not None:
|
|
280
|
+
baseline_entries = load_baseline(baseline_path)
|
|
281
|
+
if baseline_write:
|
|
282
|
+
write_baseline(baseline_path, violations)
|
|
283
|
+
effective_violations = []
|
|
284
|
+
else:
|
|
285
|
+
effective_violations, _ = apply_baseline(violations, baseline_entries)
|
|
286
|
+
|
|
287
|
+
if effective_violations is None:
|
|
288
|
+
effective_violations = violations
|
|
289
|
+
response["violations"] = len(effective_violations)
|
|
290
|
+
if baseline_path is not None:
|
|
291
|
+
response["baseline_path"] = str(baseline_path)
|
|
292
|
+
response["baseline_written"] = bool(baseline_write)
|
|
293
|
+
if fail_on_type_ambiguities and analysis.type_ambiguities:
|
|
294
|
+
response["exit_code"] = 1
|
|
295
|
+
else:
|
|
296
|
+
if baseline_write:
|
|
297
|
+
response["exit_code"] = 0
|
|
298
|
+
else:
|
|
299
|
+
response["exit_code"] = 1 if (fail_on_violations and effective_violations) else 0
|
|
300
|
+
return response
|
|
301
|
+
|
|
302
|
+
|
|
303
|
+
@server.command(SYNTHESIS_COMMAND)
|
|
304
|
+
def execute_synthesis(ls: LanguageServer, payload: dict | None = None) -> dict:
|
|
305
|
+
if payload is None:
|
|
306
|
+
payload = {}
|
|
307
|
+
try:
|
|
308
|
+
request = SynthesisRequest.model_validate(payload)
|
|
309
|
+
except Exception as exc: # pydantic validation
|
|
310
|
+
return {"protocols": [], "warnings": [], "errors": [str(exc)]}
|
|
311
|
+
|
|
312
|
+
bundle_tiers: dict[frozenset[str], int] = {}
|
|
313
|
+
for entry in request.bundles:
|
|
314
|
+
bundle = entry.bundle
|
|
315
|
+
if not bundle:
|
|
316
|
+
continue
|
|
317
|
+
bundle_tiers[frozenset(bundle)] = entry.tier
|
|
318
|
+
|
|
319
|
+
field_types = request.field_types or {}
|
|
320
|
+
config = SynthesisConfig(
|
|
321
|
+
max_tier=request.max_tier,
|
|
322
|
+
min_bundle_size=request.min_bundle_size,
|
|
323
|
+
allow_singletons=request.allow_singletons,
|
|
324
|
+
)
|
|
325
|
+
naming_context = NamingContext(
|
|
326
|
+
existing_names=set(request.existing_names),
|
|
327
|
+
frequency=request.frequency or {},
|
|
328
|
+
fallback_prefix=request.fallback_prefix,
|
|
329
|
+
)
|
|
330
|
+
plan = Synthesizer(config=config).plan(
|
|
331
|
+
bundle_tiers=bundle_tiers,
|
|
332
|
+
field_types=field_types,
|
|
333
|
+
naming_context=naming_context,
|
|
334
|
+
)
|
|
335
|
+
response = SynthesisResponse(
|
|
336
|
+
protocols=[
|
|
337
|
+
{
|
|
338
|
+
"name": spec.name,
|
|
339
|
+
"fields": [
|
|
340
|
+
{
|
|
341
|
+
"name": field.name,
|
|
342
|
+
"type_hint": field.type_hint,
|
|
343
|
+
"source_params": sorted(field.source_params),
|
|
344
|
+
}
|
|
345
|
+
for field in spec.fields
|
|
346
|
+
],
|
|
347
|
+
"bundle": sorted(spec.bundle),
|
|
348
|
+
"tier": spec.tier,
|
|
349
|
+
"rationale": spec.rationale,
|
|
350
|
+
}
|
|
351
|
+
for spec in plan.protocols
|
|
352
|
+
],
|
|
353
|
+
warnings=plan.warnings,
|
|
354
|
+
errors=plan.errors,
|
|
355
|
+
)
|
|
356
|
+
return response.model_dump()
|
|
357
|
+
|
|
358
|
+
|
|
359
|
+
@server.command(REFACTOR_COMMAND)
|
|
360
|
+
def execute_refactor(ls: LanguageServer, payload: dict | None = None) -> dict:
|
|
361
|
+
if payload is None:
|
|
362
|
+
payload = {}
|
|
363
|
+
try:
|
|
364
|
+
request = RefactorRequest.model_validate(payload)
|
|
365
|
+
except Exception as exc: # pydantic validation
|
|
366
|
+
return RefactorResponse(errors=[str(exc)]).model_dump()
|
|
367
|
+
|
|
368
|
+
project_root = None
|
|
369
|
+
if ls.workspace.root_path:
|
|
370
|
+
project_root = Path(ls.workspace.root_path)
|
|
371
|
+
engine = RefactorEngine(project_root=project_root)
|
|
372
|
+
plan = engine.plan_protocol_extraction(
|
|
373
|
+
RefactorRequestModel(
|
|
374
|
+
protocol_name=request.protocol_name,
|
|
375
|
+
bundle=request.bundle,
|
|
376
|
+
fields=[
|
|
377
|
+
FieldSpec(name=field.name, type_hint=field.type_hint)
|
|
378
|
+
for field in request.fields or []
|
|
379
|
+
],
|
|
380
|
+
target_path=request.target_path,
|
|
381
|
+
target_functions=request.target_functions,
|
|
382
|
+
rationale=request.rationale,
|
|
383
|
+
)
|
|
384
|
+
)
|
|
385
|
+
edits = [
|
|
386
|
+
TextEditDTO(
|
|
387
|
+
path=edit.path,
|
|
388
|
+
start=edit.start,
|
|
389
|
+
end=edit.end,
|
|
390
|
+
replacement=edit.replacement,
|
|
391
|
+
)
|
|
392
|
+
for edit in plan.edits
|
|
393
|
+
]
|
|
394
|
+
response = RefactorResponse(
|
|
395
|
+
edits=edits,
|
|
396
|
+
warnings=plan.warnings,
|
|
397
|
+
errors=plan.errors,
|
|
398
|
+
)
|
|
399
|
+
return response.model_dump()
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
@server.feature(TEXT_DOCUMENT_CODE_ACTION)
|
|
403
|
+
def code_action(ls: LanguageServer, params: CodeActionParams) -> list[CodeAction]:
|
|
404
|
+
path = _uri_to_path(params.text_document.uri)
|
|
405
|
+
payload = {
|
|
406
|
+
"protocol_name": "TODO_Bundle",
|
|
407
|
+
"bundle": [],
|
|
408
|
+
"target_path": str(path),
|
|
409
|
+
"target_functions": [],
|
|
410
|
+
"rationale": "Stub code action; populate bundle details manually.",
|
|
411
|
+
}
|
|
412
|
+
title = "Gabion: Extract Protocol (stub)"
|
|
413
|
+
return [
|
|
414
|
+
CodeAction(
|
|
415
|
+
title=title,
|
|
416
|
+
kind=CodeActionKind.RefactorExtract,
|
|
417
|
+
command=Command(title=title, command=REFACTOR_COMMAND, arguments=[payload]),
|
|
418
|
+
edit=WorkspaceEdit(changes={}),
|
|
419
|
+
)
|
|
420
|
+
]
|
|
421
|
+
|
|
422
|
+
|
|
423
|
+
@server.feature(TEXT_DOCUMENT_DID_OPEN)
|
|
424
|
+
def did_open(ls: LanguageServer, params) -> None:
|
|
425
|
+
uri = params.text_document.uri
|
|
426
|
+
doc = ls.workspace.get_document(uri)
|
|
427
|
+
root = Path(ls.workspace.root_path) if ls.workspace.root_path else None
|
|
428
|
+
diagnostics = _diagnostics_for_path(doc.path, root)
|
|
429
|
+
ls.publish_diagnostics(uri, diagnostics)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
@server.feature(TEXT_DOCUMENT_DID_SAVE)
|
|
433
|
+
def did_save(ls: LanguageServer, params) -> None:
|
|
434
|
+
uri = params.text_document.uri
|
|
435
|
+
doc = ls.workspace.get_document(uri)
|
|
436
|
+
root = Path(ls.workspace.root_path) if ls.workspace.root_path else None
|
|
437
|
+
diagnostics = _diagnostics_for_path(doc.path, root)
|
|
438
|
+
ls.publish_diagnostics(uri, diagnostics)
|
|
439
|
+
|
|
440
|
+
|
|
441
|
+
def start() -> None:
|
|
442
|
+
"""Start the language server (stub)."""
|
|
443
|
+
server.start_io()
|
|
444
|
+
|
|
445
|
+
|
|
446
|
+
if __name__ == "__main__":
|
|
447
|
+
start()
|
|
@@ -0,0 +1,26 @@
|
|
|
1
|
+
"""Synthesis subpackage for Gabion."""
|
|
2
|
+
|
|
3
|
+
from gabion.synthesis.merge import merge_bundles
|
|
4
|
+
from gabion.synthesis.model import (
|
|
5
|
+
FieldSpec,
|
|
6
|
+
NamingContext,
|
|
7
|
+
ProtocolSpec,
|
|
8
|
+
SynthesisConfig,
|
|
9
|
+
SynthesisPlan,
|
|
10
|
+
)
|
|
11
|
+
from gabion.synthesis.naming import suggest_name
|
|
12
|
+
from gabion.synthesis.protocols import Synthesizer
|
|
13
|
+
from gabion.synthesis.schedule import ScheduleResult, topological_schedule
|
|
14
|
+
|
|
15
|
+
__all__ = [
|
|
16
|
+
"FieldSpec",
|
|
17
|
+
"NamingContext",
|
|
18
|
+
"ProtocolSpec",
|
|
19
|
+
"ScheduleResult",
|
|
20
|
+
"SynthesisConfig",
|
|
21
|
+
"SynthesisPlan",
|
|
22
|
+
"Synthesizer",
|
|
23
|
+
"merge_bundles",
|
|
24
|
+
"suggest_name",
|
|
25
|
+
"topological_schedule",
|
|
26
|
+
]
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import Iterable, List, Set
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def _jaccard(left: Set[str], right: Set[str]) -> float:
|
|
7
|
+
if not left and not right:
|
|
8
|
+
return 1.0
|
|
9
|
+
union = left | right
|
|
10
|
+
if not union:
|
|
11
|
+
return 0.0
|
|
12
|
+
return len(left & right) / len(union)
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def merge_bundles(
|
|
16
|
+
bundles: Iterable[Set[str]],
|
|
17
|
+
min_overlap: float = 0.75,
|
|
18
|
+
) -> List[Set[str]]:
|
|
19
|
+
merged: List[Set[str]] = [set(b) for b in bundles]
|
|
20
|
+
changed = True
|
|
21
|
+
while changed:
|
|
22
|
+
changed = False
|
|
23
|
+
merged.sort(key=lambda b: (len(b), sorted(b)))
|
|
24
|
+
result: List[Set[str]] = []
|
|
25
|
+
while merged:
|
|
26
|
+
current = merged.pop(0)
|
|
27
|
+
merged_any = False
|
|
28
|
+
for idx, other in enumerate(list(merged)):
|
|
29
|
+
if _jaccard(current, other) >= min_overlap:
|
|
30
|
+
current |= other
|
|
31
|
+
merged.pop(idx)
|
|
32
|
+
merged_any = True
|
|
33
|
+
changed = True
|
|
34
|
+
break
|
|
35
|
+
if merged_any:
|
|
36
|
+
merged.append(current)
|
|
37
|
+
else:
|
|
38
|
+
result.append(current)
|
|
39
|
+
merged = result
|
|
40
|
+
merged.sort(key=lambda b: (len(b), sorted(b)))
|
|
41
|
+
return merged
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass, field
|
|
4
|
+
from typing import Dict, List, Set
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
@dataclass(frozen=True)
|
|
8
|
+
class FieldSpec:
|
|
9
|
+
name: str
|
|
10
|
+
type_hint: str | None = None
|
|
11
|
+
source_params: Set[str] = field(default_factory=set)
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@dataclass(frozen=True)
|
|
15
|
+
class ProtocolSpec:
|
|
16
|
+
name: str
|
|
17
|
+
fields: List[FieldSpec]
|
|
18
|
+
bundle: Set[str]
|
|
19
|
+
tier: int
|
|
20
|
+
rationale: str | None = None
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
@dataclass(frozen=True)
|
|
24
|
+
class SynthesisPlan:
|
|
25
|
+
protocols: List[ProtocolSpec] = field(default_factory=list)
|
|
26
|
+
warnings: List[str] = field(default_factory=list)
|
|
27
|
+
errors: List[str] = field(default_factory=list)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
@dataclass(frozen=True)
|
|
31
|
+
class NamingContext:
|
|
32
|
+
existing_names: Set[str] = field(default_factory=set)
|
|
33
|
+
frequency: Dict[str, int] = field(default_factory=dict)
|
|
34
|
+
fallback_prefix: str = "Bundle"
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
@dataclass(frozen=True)
|
|
38
|
+
class SynthesisConfig:
|
|
39
|
+
max_tier: int = 2
|
|
40
|
+
min_bundle_size: int = 2
|
|
41
|
+
allow_singletons: bool = False
|