@pmaddire/gcie 0.1.8 → 0.1.9

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -1,509 +1,576 @@
1
- """Post-initialization adaptation pipeline (accuracy first, then efficiency)."""
2
-
3
- from __future__ import annotations
4
-
5
- from dataclasses import asdict, dataclass
6
- from datetime import datetime, timezone
7
- import json
8
- import re
9
- from pathlib import Path
10
-
11
- from .context import run_context
12
- from .context_slices import _classify_query_family, run_context_slices
13
- from .index import run_index
14
-
15
- try:
16
- from performance.context_benchmark import BENCHMARK_CASES
17
- except Exception: # pragma: no cover - fallback for limited installs
18
- BENCHMARK_CASES = ()
19
-
20
-
21
- @dataclass(frozen=True, slots=True)
22
- class CaseResult:
23
- name: str
24
- family: str
25
- mode: str
26
- tokens: int
27
- expected_hits: int
28
- expected_total: int
29
- missing_expected: tuple[str, ...]
30
- context_complete: bool
31
-
32
-
33
- @dataclass(frozen=True, slots=True)
34
- class AdaptCase:
35
- name: str
36
- query: str
37
- intent: str
38
- baseline_files: tuple[str, ...]
39
- expected_files: tuple[str, ...]
40
-
41
-
42
- _WORD_RE = re.compile(r"[A-Za-z0-9_./-]+")
43
- _SOURCE_EXTS = {".py", ".js", ".jsx", ".ts", ".tsx", ".java", ".go", ".rs", ".cs", ".cpp", ".c", ".h"}
44
- _IGNORED_DIRS = {
45
- ".git",
46
- ".gcie",
47
- ".planning",
48
- ".venv",
49
- "node_modules",
50
- "__pycache__",
51
- "dist",
52
- "build",
53
- "coverage",
54
- }
55
-
56
-
57
- def _query_keywords(text: str) -> list[str]:
58
- terms: list[str] = []
59
- for token in _WORD_RE.findall(text.lower()):
60
- if len(token) < 4:
61
- continue
62
- terms.append(token)
63
- return terms[:8]
64
-
65
-
66
- def _node_to_file(node_id: str) -> str | None:
67
- if node_id.startswith("file:"):
68
- return node_id[5:]
69
- if node_id.startswith("function:"):
70
- return node_id[9:].split("::", 1)[0]
71
- if node_id.startswith("class:"):
72
- return node_id[6:].split("::", 1)[0]
73
- return None
74
-
75
-
76
- def _normalize_scoped_path(plan_path: str, rel_path: str) -> str:
77
- normalized = rel_path.replace("\\", "/").lstrip("./")
78
- if not plan_path or plan_path in {".", "./"}:
79
- return normalized
80
- base = Path(plan_path).as_posix().strip("/")
81
- if normalized.startswith(base + "/") or normalized == base:
82
- return normalized
83
- return f"{base}/{normalized}"
84
-
85
-
86
- def _family_path(expected_files: tuple[str, ...]) -> str:
87
- if not expected_files:
88
- return "."
89
- heads = {Path(p).parts[0] for p in expected_files if Path(p).parts}
90
- if len(heads) == 1:
91
- return next(iter(heads))
92
- return "."
93
-
94
-
95
- def _safe_scope(path: str) -> str:
96
- """Return a valid retrieval scope for the current repo."""
97
- if not path or path in {".", "./"}:
98
- return "."
99
- candidate = Path(path)
100
- if candidate.exists() and candidate.is_dir():
101
- return candidate.as_posix()
102
- return "."
103
-
104
-
105
- def _plan_query(case) -> tuple[str, str, int | None]:
106
- path = _family_path(case.expected_files)
107
- if getattr(case, "name", "") == "cli_context_command":
108
- path = "."
109
- query = "cli/commands/context.py llm_context/context_builder.py build_context token_budget mandatory_node_ids snippet_selector"
110
- return path, query, 950
111
- keywords = " ".join(_query_keywords(case.query)[:4])
112
- file_terms = " ".join(case.expected_files)
113
- query = f"{file_terms} {keywords}".strip()
114
- budget = 1000 if len(case.expected_files) >= 2 else None
115
- if getattr(case, "name", "") in {
116
- "repository_scanner_filters",
117
- "knowledge_index_query_api",
118
- "execution_trace_graph",
119
- "parser_fallbacks",
120
- }:
121
- budget = 800
122
- return path, query, budget
123
-
124
-
125
- def _evaluate_plain_case(case, *, allow_gapfill: bool = True) -> CaseResult:
126
- path, query, budget = _plan_query(case)
127
- path = _safe_scope(path)
128
- payload = run_context(path, query, budget=budget, intent=case.intent)
129
- files = {
130
- _normalize_scoped_path(path, rel_path)
131
- for rel_path in (_node_to_file(item.get("node_id", "")) for item in payload.get("snippets", []))
132
- if rel_path
133
- }
134
- expected = tuple(case.expected_files)
135
- missing = [rel for rel in expected if rel not in files]
136
- tokens = int(payload.get("tokens", 0) or 0)
137
- mode = "plain_context_workflow"
138
-
139
- if allow_gapfill and missing:
140
- mode = "plain_context_workflow_gapfill"
141
- for rel in list(missing):
142
- scope = _safe_scope(_family_path((rel,)))
143
- gap_keywords = " ".join(_query_keywords(case.query)[:4])
144
- gap_query = f"{rel} {gap_keywords}".strip()
145
- gap_budget = 500 if rel.endswith("/main.py") or rel == "main.py" else 900
146
- gap_payload = run_context(scope, gap_query, budget=gap_budget, intent=case.intent)
147
- tokens += int(gap_payload.get("tokens", 0) or 0)
148
- gap_files = {
149
- _normalize_scoped_path(scope, rel_path)
150
- for rel_path in (_node_to_file(item.get("node_id", "")) for item in gap_payload.get("snippets", []))
151
- if rel_path
152
- }
153
- files.update(gap_files)
154
- missing = [m for m in expected if m not in files]
155
- if not missing:
156
- break
157
-
158
- expected_hits = len(expected) - len(missing)
159
- family = _classify_query_family(query)
160
- return CaseResult(
161
- name=case.name,
162
- family=family,
163
- mode=mode,
164
- tokens=tokens,
165
- expected_hits=expected_hits,
166
- expected_total=len(expected),
167
- missing_expected=tuple(missing),
168
- context_complete=not missing,
169
- )
170
-
171
-
172
- def _evaluate_slices_case(case) -> CaseResult:
173
- payload = run_context_slices(
174
- repo=".",
175
- query=case.query,
176
- profile="low",
177
- stage_a_budget=300,
178
- stage_b_budget=600,
179
- max_total=800,
180
- intent=case.intent,
181
- pin=None,
182
- pin_budget=200,
183
- include_tests=False,
184
- )
185
- mode = "slices_low"
186
- tokens = int(payload.get("token_estimate", payload.get("tokens", 0)) or 0)
187
- files = {
188
- _node_to_file(item.get("node_id", ""))
189
- for item in payload.get("snippets", [])
190
- }
191
- files = {f for f in files if f}
192
- expected = tuple(case.expected_files)
193
- missing = [rel for rel in expected if rel not in files]
194
- if missing:
195
- mode = "slices_recall"
196
- recall_payload = run_context_slices(
197
- repo=".",
198
- query=case.query,
199
- profile="recall",
200
- stage_a_budget=400,
201
- stage_b_budget=800,
202
- max_total=1200,
203
- intent=case.intent,
204
- pin=None,
205
- pin_budget=300,
206
- include_tests=False,
207
- )
208
- tokens += int(recall_payload.get("token_estimate", recall_payload.get("tokens", 0)) or 0)
209
- files.update(
210
- {
211
- f
212
- for f in (_node_to_file(item.get("node_id", "")) for item in recall_payload.get("snippets", []))
213
- if f
214
- }
215
- )
216
- missing = [rel for rel in expected if rel not in files]
217
- if missing:
218
- mode = "slices_recall_pin"
219
- for rel in list(missing):
220
- pin_payload = run_context_slices(
221
- repo=".",
222
- query=case.query,
223
- profile="recall",
224
- stage_a_budget=400,
225
- stage_b_budget=800,
226
- max_total=1200,
227
- intent=case.intent,
228
- pin=rel,
229
- pin_budget=300,
230
- include_tests=False,
231
- )
232
- tokens += int(pin_payload.get("token_estimate", pin_payload.get("tokens", 0)) or 0)
233
- files.update(
234
- {
235
- f
236
- for f in (_node_to_file(item.get("node_id", "")) for item in pin_payload.get("snippets", []))
237
- if f
238
- }
239
- )
240
- missing = [m for m in expected if m not in files]
241
- if not missing:
242
- break
243
- expected_hits = len(expected) - len(missing)
244
- family = _classify_query_family(case.query)
245
- return CaseResult(
246
- name=case.name,
247
- family=family,
248
- mode=mode,
249
- tokens=tokens,
250
- expected_hits=expected_hits,
251
- expected_total=len(expected),
252
- missing_expected=tuple(missing),
253
- context_complete=not missing,
254
- )
255
-
256
-
257
- def _summarize(label: str, rows: list[CaseResult]) -> dict:
258
- case_count = len(rows)
259
- pass_count = sum(1 for row in rows if row.context_complete)
260
- total_tokens = sum(row.tokens for row in rows)
261
- hit_count = sum(row.expected_hits for row in rows)
262
- hit_total = sum(row.expected_total for row in rows)
263
- return {
264
- "label": label,
265
- "case_count": case_count,
266
- "passing_cases": pass_count,
267
- "full_hit_rate_pct": round((pass_count / case_count) * 100, 1) if case_count else 0.0,
268
- "target_hit_rate_pct": round((hit_count / hit_total) * 100, 1) if hit_total else 0.0,
269
- "total_tokens": total_tokens,
270
- "tokens_per_query": round(total_tokens / case_count, 1) if case_count else 0.0,
271
- "tokens_per_expected_hit": round(total_tokens / hit_count, 2) if hit_count else None,
272
- "results": [asdict(row) for row in rows],
273
- }
274
-
275
-
276
- def _collect_source_files(repo_path: Path) -> list[str]:
277
- files: list[str] = []
278
- for path in repo_path.rglob("*"):
279
- if not path.is_file():
280
- continue
281
- rel = path.relative_to(repo_path)
282
- if any(part in _IGNORED_DIRS for part in rel.parts):
283
- continue
284
- if path.suffix.lower() not in _SOURCE_EXTS:
285
- continue
286
- files.append(rel.as_posix())
287
- return sorted(files)
288
-
289
-
290
- def _static_cases_for_repo(repo_path: Path) -> list[AdaptCase]:
291
- out: list[AdaptCase] = []
292
- for case in list(BENCHMARK_CASES):
293
- expected = tuple(case.expected_files)
294
- if not expected:
295
- continue
296
- if not all((repo_path / rel).exists() for rel in expected):
297
- continue
298
- baseline = tuple(rel for rel in case.baseline_files if (repo_path / rel).exists())
299
- if not baseline:
300
- baseline = expected
301
- out.append(
302
- AdaptCase(
303
- name=case.name,
304
- query=case.query,
305
- intent=case.intent,
306
- baseline_files=baseline,
307
- expected_files=expected,
308
- )
309
- )
310
- return out
311
-
312
-
313
- def _generated_cases_for_repo(repo_path: Path, needed: int) -> list[AdaptCase]:
314
- files = _collect_source_files(repo_path)
315
- if not files:
316
- return []
317
-
318
- by_dir: dict[str, list[str]] = {}
319
- for rel in files:
320
- parent = str(Path(rel).parent).replace("\\", "/")
321
- by_dir.setdefault(parent, []).append(rel)
322
-
323
- rows: list[AdaptCase] = []
324
- seen_names: set[str] = set()
325
-
326
- def add_case(name: str, expected: tuple[str, ...], intent: str = "explore") -> None:
327
- if len(rows) >= needed:
328
- return
329
- safe_name = re.sub(r"[^a-zA-Z0-9_]+", "_", name).strip("_").lower() or "case"
330
- if safe_name in seen_names:
331
- idx = 2
332
- while f"{safe_name}_{idx}" in seen_names:
333
- idx += 1
334
- safe_name = f"{safe_name}_{idx}"
335
- seen_names.add(safe_name)
336
- symbols = []
337
- for rel in expected:
338
- stem = Path(rel).stem.lower()
339
- symbols.extend([stem, "flow", "wiring"])
340
- query = f"{' '.join(expected)} {' '.join(symbols[:6])}".strip()
341
- rows.append(
342
- AdaptCase(
343
- name=safe_name,
344
- query=query,
345
- intent=intent,
346
- baseline_files=expected,
347
- expected_files=expected,
348
- )
349
- )
350
-
351
- # Single-file probes.
352
- for rel in files:
353
- add_case(f"single_{Path(rel).stem}", (rel,), intent="explore")
354
- if len(rows) >= max(needed // 2, 1):
355
- break
356
-
357
- # Same-directory pairs.
358
- for parent, group in sorted(by_dir.items(), key=lambda item: item[0]):
359
- if len(group) < 2:
360
- continue
361
- group = sorted(group)
362
- for idx in range(len(group) - 1):
363
- add_case(f"pair_{parent}_{idx}", (group[idx], group[idx + 1]), intent="explore")
364
- if len(rows) >= needed:
365
- return rows[:needed]
366
-
367
- # Cross-directory pairs if still needed.
368
- tops: dict[str, str] = {}
369
- for rel in files:
370
- top = Path(rel).parts[0] if Path(rel).parts else rel
371
- tops.setdefault(top, rel)
372
- top_files = list(tops.values())
373
- for idx in range(len(top_files) - 1):
374
- add_case(f"cross_{idx}", (top_files[idx], top_files[idx + 1]), intent="explore")
375
- if len(rows) >= needed:
376
- break
377
-
378
- return rows[:needed]
379
-
380
-
381
- def _select_adaptation_cases(repo_path: Path, benchmark_size: int) -> tuple[list[AdaptCase], str]:
382
- """Select adaptation cases generated entirely from the target repo."""
383
- benchmark_size = max(1, int(benchmark_size))
384
- generated = _generated_cases_for_repo(repo_path, benchmark_size)
385
- if generated:
386
- return generated[:benchmark_size], "generated_repo_local"
387
- return [], "none_available"
388
-
389
-
390
- def _write_back(repo_path: Path, best: dict, case_source: str, pipeline_status: str, cost_analysis: dict) -> None:
391
- cfg_path = repo_path / ".gcie" / "context_config.json"
392
- if cfg_path.exists():
393
- try:
394
- cfg = json.loads(cfg_path.read_text(encoding="utf-8"))
395
- if not isinstance(cfg, dict):
396
- cfg = {}
397
- except Exception:
398
- cfg = {}
399
- else:
400
- cfg = {}
401
- cfg["adaptation_pipeline"] = {
402
- "status": pipeline_status,
403
- "best_label": best.get("label"),
404
- "full_hit_rate_pct": best.get("full_hit_rate_pct"),
405
- "tokens_per_query": best.get("tokens_per_query"),
406
- "case_source": case_source,
407
- "cost_analysis": cost_analysis,
408
- "updated_at": datetime.now(timezone.utc).isoformat(),
409
- }
410
- cfg_path.parent.mkdir(parents=True, exist_ok=True)
411
- cfg_path.write_text(json.dumps(cfg, indent=2), encoding="utf-8")
412
-
413
-
414
- def run_post_init_adaptation(
415
- repo: str = ".",
416
- *,
417
- benchmark_size: int = 10,
418
- efficiency_iterations: int = 5,
419
- clear_profile: bool = False,
420
- ) -> dict:
421
- """Run accuracy-lock then efficiency adaptation protocol after setup/index."""
422
- repo_path = Path(repo).resolve()
423
- run_index(repo_path.as_posix())
424
-
425
- if clear_profile:
426
- from .context_slices import clear_adaptive_profile
427
-
428
- clear_adaptive_profile(repo_path.as_posix())
429
-
430
- cases, case_source = _select_adaptation_cases(repo_path, benchmark_size)
431
- if not cases:
432
- return {
433
- "status": "no_benchmark_cases",
434
- "repo": repo_path.as_posix(),
435
- "case_source": case_source,
436
- "message": "No repo-usable adaptation cases available.",
437
- }
438
-
439
- slices_rows = [_evaluate_slices_case(case) for case in cases]
440
- plain_rows = [_evaluate_plain_case(case, allow_gapfill=False) for case in cases]
441
- plain_gap_rows = [_evaluate_plain_case(case, allow_gapfill=True) for case in cases]
442
-
443
- slices_summary = _summarize("slices_accuracy_stage", slices_rows)
444
- plain_summary = _summarize("plain_accuracy_stage", plain_rows)
445
- plain_gap_summary = _summarize("plain_gapfill_accuracy_stage", plain_gap_rows)
446
-
447
- candidates = [slices_summary, plain_summary, plain_gap_summary]
448
- full_hit = [candidate for candidate in candidates if candidate["full_hit_rate_pct"] >= 100.0]
449
- if full_hit:
450
- best = min(full_hit, key=lambda item: (item["tokens_per_expected_hit"] or 10**9, item["tokens_per_query"]))
451
- else:
452
- best = max(candidates, key=lambda item: item["target_hit_rate_pct"])
453
-
454
- efficiency_trials: list[dict] = []
455
- active = best
456
- for idx in range(max(0, int(efficiency_iterations))):
457
- if active["label"] != "plain_gapfill_accuracy_stage":
458
- break
459
- trial_rows = [_evaluate_plain_case(case, allow_gapfill=True) for case in cases]
460
- trial = _summarize(f"plain_gapfill_eff_trial_{idx + 1}", trial_rows)
461
- efficiency_trials.append(trial)
462
- if trial["full_hit_rate_pct"] >= active["full_hit_rate_pct"] and trial["tokens_per_query"] < active["tokens_per_query"]:
463
- active = trial
464
-
465
- cheapest = min(candidates, key=lambda item: (item["tokens_per_expected_hit"] or 10**9, item["tokens_per_query"]))
466
- token_delta = int(active["total_tokens"] - cheapest["total_tokens"])
467
- pct_delta = round((token_delta / max(1, int(cheapest["total_tokens"]))) * 100, 1)
468
-
469
- pipeline_status = "ok"
470
- if (
471
- active.get("full_hit_rate_pct", 0.0) >= 100.0
472
- and active.get("label") != cheapest.get("label")
473
- and pct_delta > 40.0
474
- ):
475
- pipeline_status = "accuracy_locked_but_cost_risky"
476
-
477
- cost_analysis = {
478
- "cheapest_label": cheapest.get("label"),
479
- "selected_label": active.get("label"),
480
- "selected_vs_cheapest_token_delta": token_delta,
481
- "selected_vs_cheapest_pct_delta": pct_delta,
482
- "risk_threshold_pct": 40.0,
483
- "cost_risky": pipeline_status == "accuracy_locked_but_cost_risky",
484
- }
485
-
486
- _write_back(repo_path, active, case_source, pipeline_status, cost_analysis)
487
-
488
- report = {
489
- "status": pipeline_status,
490
- "repo": repo_path.as_posix(),
491
- "benchmark_size": len(cases),
492
- "requested_benchmark_size": int(benchmark_size),
493
- "efficiency_iterations": int(efficiency_iterations),
494
- "case_source": case_source,
495
- "cost_analysis": cost_analysis,
496
- "stages": {
497
- "accuracy_candidates": [slices_summary, plain_summary, plain_gap_summary],
498
- "selected_after_accuracy": best,
499
- "efficiency_trials": efficiency_trials,
500
- "selected_final": active,
501
- },
502
- }
503
-
504
- planning_dir = repo_path / ".planning"
505
- planning_dir.mkdir(parents=True, exist_ok=True)
506
- out_path = planning_dir / "post_init_adaptation_report.json"
507
- out_path.write_text(json.dumps(report, indent=2), encoding="utf-8")
508
- report["report_path"] = out_path.as_posix()
1
+ """Post-initialization adaptation pipeline (accuracy rounds first, then efficiency rounds)."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import asdict, dataclass
6
+ from datetime import datetime, timezone
7
+ import json
8
+ import re
9
+ from pathlib import Path
10
+
11
+ from .context import run_context
12
+ from .context_slices import _classify_query_family, run_context_slices
13
+ from .index import run_index
14
+
15
+ try:
16
+ from performance.context_benchmark import BENCHMARK_CASES
17
+ except Exception: # pragma: no cover
18
+ BENCHMARK_CASES = ()
19
+
20
+
21
+ @dataclass(frozen=True, slots=True)
22
+ class CaseResult:
23
+ name: str
24
+ family: str
25
+ mode: str
26
+ tokens: int
27
+ expected_hits: int
28
+ expected_total: int
29
+ missing_expected: tuple[str, ...]
30
+ context_complete: bool
31
+
32
+
33
+ @dataclass(frozen=True, slots=True)
34
+ class AdaptCase:
35
+ name: str
36
+ query: str
37
+ intent: str
38
+ baseline_files: tuple[str, ...]
39
+ expected_files: tuple[str, ...]
40
+
41
+
42
+ _WORD_RE = re.compile(r"[A-Za-z0-9_./-]+")
43
+ _SOURCE_EXTS = {".py", ".js", ".jsx", ".ts", ".tsx", ".java", ".go", ".rs", ".cs", ".cpp", ".c", ".h"}
44
+ _IGNORED_DIRS = {
45
+ ".git",
46
+ ".gcie",
47
+ ".planning",
48
+ ".venv",
49
+ "node_modules",
50
+ "__pycache__",
51
+ "dist",
52
+ "build",
53
+ "coverage",
54
+ }
55
+ _METHOD_ORDER = ["plain", "plain_gapfill", "plain_rescue", "slices"]
56
+
57
+
58
+ def _query_keywords(text: str) -> list[str]:
59
+ return [t for t in _WORD_RE.findall(text.lower()) if len(t) >= 4][:8]
60
+
61
+
62
+ def _node_to_file(node_id: str) -> str | None:
63
+ if node_id.startswith("file:"):
64
+ return node_id[5:]
65
+ if node_id.startswith("function:"):
66
+ return node_id[9:].split("::", 1)[0]
67
+ if node_id.startswith("class:"):
68
+ return node_id[6:].split("::", 1)[0]
69
+ return None
70
+
71
+
72
+ def _normalize_scoped_path(plan_path: str, rel_path: str) -> str:
73
+ normalized = rel_path.replace("\\", "/").lstrip("./")
74
+ if not plan_path or plan_path in {".", "./"}:
75
+ return normalized
76
+ base = Path(plan_path).as_posix().strip("/")
77
+ if normalized.startswith(base + "/") or normalized == base:
78
+ return normalized
79
+ return f"{base}/{normalized}"
80
+
81
+
82
+ def _family_path(expected_files: tuple[str, ...]) -> str:
83
+ if not expected_files:
84
+ return "."
85
+ heads = {Path(p).parts[0] for p in expected_files if Path(p).parts}
86
+ return next(iter(heads)) if len(heads) == 1 else "."
87
+
88
+
89
+ def _safe_scope(path: str) -> str:
90
+ if not path or path in {".", "./"}:
91
+ return "."
92
+ candidate = Path(path)
93
+ if candidate.exists() and candidate.is_dir():
94
+ return candidate.as_posix()
95
+ return "."
96
+
97
+
98
+ def _plan_query(case) -> tuple[str, str, int | None]:
99
+ path = _family_path(case.expected_files)
100
+ if getattr(case, "name", "") == "cli_context_command":
101
+ return ".", "cli/commands/context.py llm_context/context_builder.py build_context token_budget mandatory_node_ids snippet_selector", 950
102
+ keywords = " ".join(_query_keywords(case.query)[:4])
103
+ file_terms = " ".join(case.expected_files)
104
+ query = f"{file_terms} {keywords}".strip()
105
+ budget = 1000 if len(case.expected_files) >= 2 else None
106
+ if getattr(case, "name", "") in {"repository_scanner_filters", "knowledge_index_query_api", "execution_trace_graph", "parser_fallbacks"}:
107
+ budget = 800
108
+ return path, query, budget
109
+
110
+
111
+ def _evaluate_plain_case(case, *, allow_gapfill: bool = True, aggressive_gapfill: bool = False) -> CaseResult:
112
+ path, query, budget = _plan_query(case)
113
+ path = _safe_scope(path)
114
+ payload = run_context(path, query, budget=budget, intent=case.intent)
115
+ files = {
116
+ _normalize_scoped_path(path, rel)
117
+ for rel in (_node_to_file(item.get("node_id", "")) for item in payload.get("snippets", []))
118
+ if rel
119
+ }
120
+ expected = tuple(case.expected_files)
121
+ missing = [rel for rel in expected if rel not in files]
122
+ tokens = int(payload.get("tokens", 0) or 0)
123
+ mode = "plain_context_workflow"
124
+
125
+ if allow_gapfill and missing:
126
+ mode = "plain_context_workflow_gapfill"
127
+ for rel in list(missing):
128
+ gap_keywords = " ".join(_query_keywords(case.query)[:4])
129
+ gap_query = f"{rel} {gap_keywords}".strip()
130
+ scopes = [_safe_scope(_family_path((rel,)))]
131
+ budgets = [500 if rel.endswith('/main.py') or rel == 'main.py' else 900]
132
+ if aggressive_gapfill:
133
+ scopes.append('.')
134
+ budgets.append(max(budgets[0], 1200))
135
+ mode = "plain_context_workflow_gapfill_rescue"
136
+ for scope, gap_budget in zip(scopes, budgets):
137
+ gap_payload = run_context(scope, gap_query, budget=gap_budget, intent=case.intent)
138
+ tokens += int(gap_payload.get("tokens", 0) or 0)
139
+ gap_files = {
140
+ _normalize_scoped_path(scope, rel2)
141
+ for rel2 in (_node_to_file(item.get("node_id", "")) for item in gap_payload.get("snippets", []))
142
+ if rel2
143
+ }
144
+ files.update(gap_files)
145
+ missing = [m for m in expected if m not in files]
146
+ if not missing:
147
+ break
148
+ if not missing:
149
+ break
150
+
151
+ expected_hits = len(expected) - len(missing)
152
+ family = _classify_query_family(query)
153
+ return CaseResult(
154
+ name=case.name,
155
+ family=family,
156
+ mode=mode,
157
+ tokens=tokens,
158
+ expected_hits=expected_hits,
159
+ expected_total=len(expected),
160
+ missing_expected=tuple(missing),
161
+ context_complete=not missing,
162
+ )
163
+
164
+
165
+ def _evaluate_slices_case(case) -> CaseResult:
166
+ payload = run_context_slices(
167
+ repo='.',
168
+ query=case.query,
169
+ profile='low',
170
+ stage_a_budget=300,
171
+ stage_b_budget=600,
172
+ max_total=800,
173
+ intent=case.intent,
174
+ pin=None,
175
+ pin_budget=200,
176
+ include_tests=False,
177
+ )
178
+ mode = "slices_low"
179
+ tokens = int(payload.get("token_estimate", payload.get("tokens", 0)) or 0)
180
+ files = {f for f in (_node_to_file(item.get("node_id", "")) for item in payload.get("snippets", [])) if f}
181
+ expected = tuple(case.expected_files)
182
+ missing = [rel for rel in expected if rel not in files]
183
+ if missing:
184
+ mode = "slices_recall"
185
+ recall_payload = run_context_slices(
186
+ repo='.',
187
+ query=case.query,
188
+ profile='recall',
189
+ stage_a_budget=400,
190
+ stage_b_budget=800,
191
+ max_total=1200,
192
+ intent=case.intent,
193
+ pin=None,
194
+ pin_budget=300,
195
+ include_tests=False,
196
+ )
197
+ tokens += int(recall_payload.get("token_estimate", recall_payload.get("tokens", 0)) or 0)
198
+ files.update({f for f in (_node_to_file(item.get("node_id", "")) for item in recall_payload.get("snippets", [])) if f})
199
+ missing = [rel for rel in expected if rel not in files]
200
+ if missing:
201
+ mode = "slices_recall_pin"
202
+ for rel in list(missing):
203
+ pin_payload = run_context_slices(
204
+ repo='.',
205
+ query=case.query,
206
+ profile='recall',
207
+ stage_a_budget=400,
208
+ stage_b_budget=800,
209
+ max_total=1200,
210
+ intent=case.intent,
211
+ pin=rel,
212
+ pin_budget=300,
213
+ include_tests=False,
214
+ )
215
+ tokens += int(pin_payload.get("token_estimate", pin_payload.get("tokens", 0)) or 0)
216
+ files.update({f for f in (_node_to_file(item.get("node_id", "")) for item in pin_payload.get("snippets", [])) if f})
217
+ missing = [m for m in expected if m not in files]
218
+ if not missing:
219
+ break
220
+
221
+ expected_hits = len(expected) - len(missing)
222
+ family = _classify_query_family(case.query)
223
+ return CaseResult(
224
+ name=case.name,
225
+ family=family,
226
+ mode=mode,
227
+ tokens=tokens,
228
+ expected_hits=expected_hits,
229
+ expected_total=len(expected),
230
+ missing_expected=tuple(missing),
231
+ context_complete=not missing,
232
+ )
233
+
234
+
235
+ def _evaluate_case_with_method(case, method: str) -> CaseResult:
236
+ if method == "plain":
237
+ return _evaluate_plain_case(case, allow_gapfill=False)
238
+ if method == "plain_gapfill":
239
+ return _evaluate_plain_case(case, allow_gapfill=True, aggressive_gapfill=False)
240
+ if method == "plain_rescue":
241
+ return _evaluate_plain_case(case, allow_gapfill=True, aggressive_gapfill=True)
242
+ return _evaluate_slices_case(case)
243
+
244
+
245
+ def _summarize(label: str, rows: list[CaseResult]) -> dict:
246
+ case_count = len(rows)
247
+ pass_count = sum(1 for row in rows if row.context_complete)
248
+ total_tokens = sum(row.tokens for row in rows)
249
+ hit_count = sum(row.expected_hits for row in rows)
250
+ hit_total = sum(row.expected_total for row in rows)
251
+ return {
252
+ "label": label,
253
+ "case_count": case_count,
254
+ "passing_cases": pass_count,
255
+ "full_hit_rate_pct": round((pass_count / case_count) * 100, 1) if case_count else 0.0,
256
+ "target_hit_rate_pct": round((hit_count / hit_total) * 100, 1) if hit_total else 0.0,
257
+ "total_tokens": total_tokens,
258
+ "tokens_per_query": round(total_tokens / case_count, 1) if case_count else 0.0,
259
+ "tokens_per_expected_hit": round(total_tokens / hit_count, 2) if hit_count else None,
260
+ "results": [asdict(row) for row in rows],
261
+ }
262
+
263
+
264
+ def _collect_source_files(repo_path: Path) -> list[str]:
265
+ files: list[str] = []
266
+ for path in repo_path.rglob('*'):
267
+ if not path.is_file():
268
+ continue
269
+ rel = path.relative_to(repo_path)
270
+ if any(part in _IGNORED_DIRS for part in rel.parts):
271
+ continue
272
+ if path.suffix.lower() not in _SOURCE_EXTS:
273
+ continue
274
+ files.append(rel.as_posix())
275
+ return sorted(files)
276
+
277
+
278
+ def _generated_cases_for_repo(repo_path: Path, needed: int) -> list[AdaptCase]:
279
+ files = _collect_source_files(repo_path)
280
+ if not files:
281
+ return []
282
+
283
+ by_dir: dict[str, list[str]] = {}
284
+ for rel in files:
285
+ parent = str(Path(rel).parent).replace('\\', '/')
286
+ by_dir.setdefault(parent, []).append(rel)
287
+
288
+ rows: list[AdaptCase] = []
289
+ seen_names: set[str] = set()
290
+
291
+ def add_case(name: str, expected: tuple[str, ...], intent: str = 'explore') -> None:
292
+ if len(rows) >= needed:
293
+ return
294
+ safe_name = re.sub(r"[^a-zA-Z0-9_]+", "_", name).strip("_").lower() or "case"
295
+ if safe_name in seen_names:
296
+ idx = 2
297
+ while f"{safe_name}_{idx}" in seen_names:
298
+ idx += 1
299
+ safe_name = f"{safe_name}_{idx}"
300
+ seen_names.add(safe_name)
301
+ symbols: list[str] = []
302
+ for rel in expected:
303
+ stem = Path(rel).stem.lower()
304
+ symbols.extend([stem, 'flow', 'wiring'])
305
+ query = f"{' '.join(expected)} {' '.join(symbols[:6])}".strip()
306
+ rows.append(AdaptCase(name=safe_name, query=query, intent=intent, baseline_files=expected, expected_files=expected))
307
+
308
+ # single-file
309
+ for rel in files:
310
+ add_case(f"single_{Path(rel).stem}", (rel,), intent='explore')
311
+ if len(rows) >= max(needed // 2, 1):
312
+ break
313
+
314
+ # same-dir pairs
315
+ for parent, group in sorted(by_dir.items(), key=lambda x: x[0]):
316
+ if len(group) < 2:
317
+ continue
318
+ group = sorted(group)
319
+ for idx in range(len(group) - 1):
320
+ add_case(f"pair_{parent}_{idx}", (group[idx], group[idx + 1]), intent='explore')
321
+ if len(rows) >= needed:
322
+ return rows[:needed]
323
+
324
+ # cross-dir pairs fallback
325
+ tops: dict[str, str] = {}
326
+ for rel in files:
327
+ top = Path(rel).parts[0] if Path(rel).parts else rel
328
+ tops.setdefault(top, rel)
329
+ top_files = list(tops.values())
330
+ for idx in range(len(top_files) - 1):
331
+ add_case(f"cross_{idx}", (top_files[idx], top_files[idx + 1]), intent='explore')
332
+ if len(rows) >= needed:
333
+ break
334
+
335
+ return rows[:needed]
336
+
337
+
338
+ def _select_adaptation_cases(repo_path: Path, benchmark_size: int) -> tuple[list[AdaptCase], str]:
339
+ benchmark_size = max(1, int(benchmark_size))
340
+ generated = _generated_cases_for_repo(repo_path, benchmark_size)
341
+ if generated:
342
+ return generated[:benchmark_size], 'generated_repo_local'
343
+ return [], 'none_available'
344
+
345
+
346
+ def _next_method(method: str) -> str:
347
+ try:
348
+ idx = _METHOD_ORDER.index(method)
349
+ except ValueError:
350
+ return _METHOD_ORDER[0]
351
+ return _METHOD_ORDER[min(idx + 1, len(_METHOD_ORDER) - 1)]
352
+
353
+
354
+ def _cheaper_method(method: str) -> str | None:
355
+ try:
356
+ idx = _METHOD_ORDER.index(method)
357
+ except ValueError:
358
+ return None
359
+ if idx <= 0:
360
+ return None
361
+ return _METHOD_ORDER[idx - 1]
362
+
363
+
364
+ def _run_family_policy(cases: list[AdaptCase], family_policy: dict[str, str]) -> tuple[list[CaseResult], dict, dict[str, dict]]:
365
+ rows: list[CaseResult] = []
366
+ for case in cases:
367
+ family = _classify_query_family(case.query)
368
+ method = family_policy.get(family, 'plain')
369
+ rows.append(_evaluate_case_with_method(case, method))
370
+ summary = _summarize('policy_run', rows)
371
+
372
+ by_family: dict[str, dict] = {}
373
+ for row in rows:
374
+ entry = by_family.setdefault(row.family, {'cases': 0, 'passes': 0, 'tokens': 0})
375
+ entry['cases'] += 1
376
+ entry['passes'] += 1 if row.context_complete else 0
377
+ entry['tokens'] += row.tokens
378
+ for fam, entry in by_family.items():
379
+ entry['pass_rate'] = round(entry['passes'] / max(1, entry['cases']), 3)
380
+ entry['tokens_per_case'] = round(entry['tokens'] / max(1, entry['cases']), 1)
381
+
382
+ return rows, summary, by_family
383
+
384
+
385
+ def _write_back(repo_path: Path, best: dict, case_source: str, pipeline_status: str, cost_analysis: dict, family_policy: dict[str, str]) -> None:
386
+ cfg_path = repo_path / '.gcie' / 'context_config.json'
387
+ if cfg_path.exists():
388
+ try:
389
+ cfg = json.loads(cfg_path.read_text(encoding='utf-8'))
390
+ if not isinstance(cfg, dict):
391
+ cfg = {}
392
+ except Exception:
393
+ cfg = {}
394
+ else:
395
+ cfg = {}
396
+
397
+ cfg['adaptation_pipeline'] = {
398
+ 'status': pipeline_status,
399
+ 'best_label': best.get('label'),
400
+ 'full_hit_rate_pct': best.get('full_hit_rate_pct'),
401
+ 'tokens_per_query': best.get('tokens_per_query'),
402
+ 'case_source': case_source,
403
+ 'cost_analysis': cost_analysis,
404
+ 'family_policy': family_policy,
405
+ 'updated_at': datetime.now(timezone.utc).isoformat(),
406
+ }
407
+ cfg_path.parent.mkdir(parents=True, exist_ok=True)
408
+ cfg_path.write_text(json.dumps(cfg, indent=2), encoding='utf-8')
409
+
410
+
411
+ def run_post_init_adaptation(
412
+ repo: str = '.',
413
+ *,
414
+ benchmark_size: int = 10,
415
+ efficiency_iterations: int = 5,
416
+ clear_profile: bool = False,
417
+ ) -> dict:
418
+ repo_path = Path(repo).resolve()
419
+ run_index(repo_path.as_posix())
420
+
421
+ if clear_profile:
422
+ from .context_slices import clear_adaptive_profile
423
+
424
+ clear_adaptive_profile(repo_path.as_posix())
425
+
426
+ cases, case_source = _select_adaptation_cases(repo_path, benchmark_size)
427
+ if not cases:
428
+ return {
429
+ 'status': 'no_benchmark_cases',
430
+ 'repo': repo_path.as_posix(),
431
+ 'case_source': case_source,
432
+ 'message': 'No repo-usable adaptation cases available.',
433
+ }
434
+
435
+ families = sorted({_classify_query_family(case.query) for case in cases})
436
+ family_policy = {fam: 'plain' for fam in families}
437
+
438
+ # Accuracy rounds: promote methods per failing family until lock.
439
+ accuracy_rounds_max = 5
440
+ accuracy_rounds: list[dict] = []
441
+ lock_streak = 0
442
+
443
+ for rnd in range(1, accuracy_rounds_max + 1):
444
+ rows, summary, by_family = _run_family_policy(cases, family_policy)
445
+ round_payload = {
446
+ 'round': rnd,
447
+ 'family_policy': dict(family_policy),
448
+ 'summary': summary,
449
+ 'family_metrics': by_family,
450
+ }
451
+ accuracy_rounds.append(round_payload)
452
+
453
+ if summary['full_hit_rate_pct'] >= 100.0:
454
+ lock_streak += 1
455
+ if lock_streak >= 2:
456
+ break
457
+ continue
458
+
459
+ lock_streak = 0
460
+ for fam, metrics in by_family.items():
461
+ if metrics.get('pass_rate', 0.0) < 1.0:
462
+ family_policy[fam] = _next_method(family_policy.get(fam, 'plain'))
463
+
464
+ # Select best accuracy-locked round if available.
465
+ locked_rounds = [r for r in accuracy_rounds if r['summary']['full_hit_rate_pct'] >= 100.0]
466
+ if locked_rounds:
467
+ selected_accuracy_round = min(
468
+ locked_rounds,
469
+ key=lambda r: (r['summary'].get('tokens_per_expected_hit') or 10**9, r['summary'].get('tokens_per_query', 10**9)),
470
+ )
471
+ else:
472
+ selected_accuracy_round = max(
473
+ accuracy_rounds,
474
+ key=lambda r: (r['summary'].get('target_hit_rate_pct', 0.0), -r['summary'].get('tokens_per_query', 10**9)),
475
+ )
476
+
477
+ family_policy = dict(selected_accuracy_round['family_policy'])
478
+ rows, current_summary, by_family = _run_family_policy(cases, family_policy)
479
+
480
+ # Efficiency rounds: attempt family-level cheaper method under hard 100% gate.
481
+ efficiency_trials: list[dict] = []
482
+ for idx in range(max(0, int(efficiency_iterations))):
483
+ improved = False
484
+ for fam in families:
485
+ cheaper = _cheaper_method(family_policy.get(fam, 'plain'))
486
+ if not cheaper:
487
+ continue
488
+ trial_policy = dict(family_policy)
489
+ trial_policy[fam] = cheaper
490
+ _, trial_summary, trial_by_family = _run_family_policy(cases, trial_policy)
491
+ trial_payload = {
492
+ 'iteration': idx + 1,
493
+ 'family': fam,
494
+ 'trial_policy': trial_policy,
495
+ 'summary': trial_summary,
496
+ }
497
+ efficiency_trials.append(trial_payload)
498
+
499
+ if (
500
+ trial_summary.get('full_hit_rate_pct', 0.0) >= 100.0
501
+ and trial_summary.get('tokens_per_query', 10**9) < current_summary.get('tokens_per_query', 10**9)
502
+ ):
503
+ family_policy = trial_policy
504
+ current_summary = trial_summary
505
+ by_family = trial_by_family
506
+ improved = True
507
+ if not improved:
508
+ break
509
+
510
+ # Global candidate snapshots for transparency.
511
+ slices_rows = [_evaluate_case_with_method(case, 'slices') for case in cases]
512
+ plain_rows = [_evaluate_case_with_method(case, 'plain') for case in cases]
513
+ plain_gap_rows = [_evaluate_case_with_method(case, 'plain_gapfill') for case in cases]
514
+ plain_rescue_rows = [_evaluate_case_with_method(case, 'plain_rescue') for case in cases]
515
+ slices_summary = _summarize('slices_accuracy_stage', slices_rows)
516
+ plain_summary = _summarize('plain_accuracy_stage', plain_rows)
517
+ plain_gap_summary = _summarize('plain_gapfill_accuracy_stage', plain_gap_rows)
518
+ plain_rescue_summary = _summarize('plain_rescue_accuracy_stage', plain_rescue_rows)
519
+ candidates = [slices_summary, plain_summary, plain_gap_summary, plain_rescue_summary]
520
+
521
+ active = {
522
+ 'label': 'family_policy_selected',
523
+ **current_summary,
524
+ }
525
+
526
+ cheapest = min(candidates, key=lambda item: (item.get('tokens_per_expected_hit') or 10**9, item.get('tokens_per_query', 10**9)))
527
+ token_delta = int(active['total_tokens'] - cheapest['total_tokens'])
528
+ pct_delta = round((token_delta / max(1, int(cheapest['total_tokens']))) * 100, 1)
529
+
530
+ pipeline_status = 'ok'
531
+ if (
532
+ active.get('full_hit_rate_pct', 0.0) >= 100.0
533
+ and active.get('tokens_per_query', 10**9) > cheapest.get('tokens_per_query', 10**9)
534
+ and pct_delta > 40.0
535
+ ):
536
+ pipeline_status = 'accuracy_locked_but_cost_risky'
537
+
538
+ cost_analysis = {
539
+ 'cheapest_label': cheapest.get('label'),
540
+ 'selected_label': active.get('label'),
541
+ 'selected_vs_cheapest_token_delta': token_delta,
542
+ 'selected_vs_cheapest_pct_delta': pct_delta,
543
+ 'risk_threshold_pct': 40.0,
544
+ 'cost_risky': pipeline_status == 'accuracy_locked_but_cost_risky',
545
+ }
546
+
547
+ _write_back(repo_path, active, case_source, pipeline_status, cost_analysis, family_policy)
548
+
549
+ report = {
550
+ 'status': pipeline_status,
551
+ 'repo': repo_path.as_posix(),
552
+ 'benchmark_size': len(cases),
553
+ 'requested_benchmark_size': int(benchmark_size),
554
+ 'efficiency_iterations': int(efficiency_iterations),
555
+ 'case_source': case_source,
556
+ 'family_policy': family_policy,
557
+ 'cost_analysis': cost_analysis,
558
+ 'phases': {
559
+ 'accuracy_rounds': accuracy_rounds,
560
+ 'selected_accuracy_round': selected_accuracy_round,
561
+ 'efficiency_trials': efficiency_trials,
562
+ },
563
+ 'stages': {
564
+ 'accuracy_candidates': candidates,
565
+ 'selected_after_accuracy': selected_accuracy_round['summary'],
566
+ 'efficiency_trials': efficiency_trials,
567
+ 'selected_final': active,
568
+ },
569
+ }
570
+
571
+ planning_dir = repo_path / '.planning'
572
+ planning_dir.mkdir(parents=True, exist_ok=True)
573
+ out_path = planning_dir / 'post_init_adaptation_report.json'
574
+ out_path.write_text(json.dumps(report, indent=2), encoding='utf-8')
575
+ report['report_path'] = out_path.as_posix()
509
576
  return report
package/package.json CHANGED
@@ -1,6 +1,6 @@
1
1
  {
2
2
  "name": "@pmaddire/gcie",
3
- "version": "0.1.8",
3
+ "version": "0.1.9",
4
4
  "description": "GraphCode Intelligence Engine one-command setup and context CLI",
5
5
  "bin": {
6
6
  "gcie": "bin/gcie.js",