abstractgateway 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractgateway/__init__.py +1 -2
- abstractgateway/__main__.py +7 -0
- abstractgateway/app.py +4 -4
- abstractgateway/cli.py +568 -8
- abstractgateway/config.py +15 -5
- abstractgateway/embeddings_config.py +45 -0
- abstractgateway/host_metrics.py +274 -0
- abstractgateway/hosts/bundle_host.py +528 -55
- abstractgateway/hosts/visualflow_host.py +30 -3
- abstractgateway/integrations/__init__.py +2 -0
- abstractgateway/integrations/email_bridge.py +782 -0
- abstractgateway/integrations/telegram_bridge.py +534 -0
- abstractgateway/maintenance/__init__.py +5 -0
- abstractgateway/maintenance/action_tokens.py +100 -0
- abstractgateway/maintenance/backlog_exec_runner.py +1592 -0
- abstractgateway/maintenance/backlog_parser.py +184 -0
- abstractgateway/maintenance/draft_generator.py +451 -0
- abstractgateway/maintenance/llm_assist.py +212 -0
- abstractgateway/maintenance/notifier.py +109 -0
- abstractgateway/maintenance/process_manager.py +1064 -0
- abstractgateway/maintenance/report_models.py +81 -0
- abstractgateway/maintenance/report_parser.py +219 -0
- abstractgateway/maintenance/text_similarity.py +123 -0
- abstractgateway/maintenance/triage.py +507 -0
- abstractgateway/maintenance/triage_queue.py +142 -0
- abstractgateway/migrate.py +155 -0
- abstractgateway/routes/__init__.py +2 -2
- abstractgateway/routes/gateway.py +10817 -179
- abstractgateway/routes/triage.py +118 -0
- abstractgateway/runner.py +689 -14
- abstractgateway/security/gateway_security.py +425 -110
- abstractgateway/service.py +213 -6
- abstractgateway/stores.py +64 -4
- abstractgateway/workflow_deprecations.py +225 -0
- abstractgateway-0.1.1.dist-info/METADATA +135 -0
- abstractgateway-0.1.1.dist-info/RECORD +40 -0
- abstractgateway-0.1.0.dist-info/METADATA +0 -101
- abstractgateway-0.1.0.dist-info/RECORD +0 -18
- {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/WHEEL +0 -0
- {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,507 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import datetime
|
|
4
|
+
import os
|
|
5
|
+
import re
|
|
6
|
+
from dataclasses import asdict
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
9
|
+
|
|
10
|
+
from .backlog_parser import BacklogItem, iter_backlog_items
|
|
11
|
+
from .draft_generator import BacklogIdAllocator, write_backlog_draft
|
|
12
|
+
from .llm_assist import llm_assist, load_llm_assist_config
|
|
13
|
+
from .report_models import ReportRecord, SimilarityCandidate, TriageDecision
|
|
14
|
+
from .report_parser import parse_report_file
|
|
15
|
+
from .text_similarity import top_k_similar
|
|
16
|
+
from .triage_queue import decision_id_for_report, decisions_dir, iter_decisions, upsert_decision, load_decision, save_decision
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _now_utc_iso() -> str:
|
|
20
|
+
return datetime.datetime.now(datetime.timezone.utc).isoformat()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def _env(name: str, fallback: Optional[str] = None) -> Optional[str]:
|
|
24
|
+
v = os.getenv(name)
|
|
25
|
+
if v is not None and str(v).strip():
|
|
26
|
+
return str(v).strip()
|
|
27
|
+
if fallback:
|
|
28
|
+
v2 = os.getenv(fallback)
|
|
29
|
+
if v2 is not None and str(v2).strip():
|
|
30
|
+
return str(v2).strip()
|
|
31
|
+
return None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def find_repo_root(start: Path) -> Optional[Path]:
|
|
35
|
+
p = Path(start).expanduser().resolve()
|
|
36
|
+
for cand in [p, *p.parents]:
|
|
37
|
+
if (cand / "docs" / "backlog" / "README.md").exists():
|
|
38
|
+
return cand
|
|
39
|
+
return None
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
def _scan_reports_in_dir(dir_path: Path, *, report_type: str) -> List[ReportRecord]:
|
|
43
|
+
out: List[ReportRecord] = []
|
|
44
|
+
if not dir_path.exists() or not dir_path.is_dir():
|
|
45
|
+
return out
|
|
46
|
+
for p in sorted(dir_path.glob("*.md")):
|
|
47
|
+
if p.name == "template.md":
|
|
48
|
+
continue
|
|
49
|
+
try:
|
|
50
|
+
rec = parse_report_file(p)
|
|
51
|
+
except Exception:
|
|
52
|
+
continue
|
|
53
|
+
# Trust folder when it disagrees with header.
|
|
54
|
+
if report_type in {"bug", "feature"}:
|
|
55
|
+
rec = ReportRecord(
|
|
56
|
+
report_type=report_type, # type: ignore[arg-type]
|
|
57
|
+
path=rec.path,
|
|
58
|
+
header=rec.header,
|
|
59
|
+
description=rec.description,
|
|
60
|
+
sections=rec.sections,
|
|
61
|
+
context=rec.context,
|
|
62
|
+
)
|
|
63
|
+
out.append(rec)
|
|
64
|
+
return out
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def scan_gateway_reports(*, gateway_data_dir: Path) -> List[ReportRecord]:
|
|
68
|
+
base = Path(gateway_data_dir).expanduser().resolve()
|
|
69
|
+
bugs = _scan_reports_in_dir(base / "bug_reports", report_type="bug")
|
|
70
|
+
feats = _scan_reports_in_dir(base / "feature_requests", report_type="feature")
|
|
71
|
+
return bugs + feats
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
def _content_missing_bug(report: ReportRecord, section_name: str) -> bool:
|
|
75
|
+
raw = report.sections.get(section_name) or ""
|
|
76
|
+
text = raw.strip()
|
|
77
|
+
if not text:
|
|
78
|
+
return True
|
|
79
|
+
|
|
80
|
+
low = text.lower()
|
|
81
|
+
if section_name == "Impact":
|
|
82
|
+
return ("who is affected" in low) and ("how bad is it" in low)
|
|
83
|
+
if section_name == "Steps to Reproduce":
|
|
84
|
+
# Template placeholders: "1.\n2."
|
|
85
|
+
nonempty = [ln.strip() for ln in text.splitlines() if ln.strip()]
|
|
86
|
+
if not nonempty:
|
|
87
|
+
return True
|
|
88
|
+
if all(re.match(r"^\d+\.\s*$", ln) for ln in nonempty): # type: ignore[name-defined]
|
|
89
|
+
return True
|
|
90
|
+
return False
|
|
91
|
+
if section_name == "Workaround":
|
|
92
|
+
return low in {"(if any)", "if any", "(none)"} or "(if any)" in low
|
|
93
|
+
if section_name in {"Expected Behavior", "Actual Behavior", "Notes / Hypotheses"}:
|
|
94
|
+
return not bool(text.strip())
|
|
95
|
+
if section_name in {"Reproducibility", "Severity"}:
|
|
96
|
+
# If the user didn't tick anything, all lines remain unchecked.
|
|
97
|
+
return "- [x]" not in low and "- [X]" not in text
|
|
98
|
+
return False
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def _content_missing_feature(report: ReportRecord, section_name: str) -> bool:
|
|
102
|
+
raw = report.sections.get(section_name) or ""
|
|
103
|
+
text = raw.strip()
|
|
104
|
+
if not text:
|
|
105
|
+
return True
|
|
106
|
+
low = text.lower()
|
|
107
|
+
if section_name == "Problem / Motivation":
|
|
108
|
+
return ("what is painful today" in low) and ("who needs this" in low)
|
|
109
|
+
if section_name == "Proposed Solution":
|
|
110
|
+
return ("what should the system do" in low) and ("ux expectations" in low)
|
|
111
|
+
if section_name == "Acceptance Criteria":
|
|
112
|
+
return "(clear, testable outcomes)" in low or text.strip() == "- [ ]"
|
|
113
|
+
if section_name == "Scope":
|
|
114
|
+
# If both Included/Excluded remain blank bullets.
|
|
115
|
+
return "\n- \n" in (text.replace("\r\n", "\n") + "\n")
|
|
116
|
+
return False
|
|
117
|
+
|
|
118
|
+
|
|
119
|
+
def compute_missing_fields(report: ReportRecord) -> List[str]:
|
|
120
|
+
missing: List[str] = []
|
|
121
|
+
if report.report_type == "bug":
|
|
122
|
+
checks = [
|
|
123
|
+
("Impact", "Impact not filled"),
|
|
124
|
+
("Steps to Reproduce", "Steps to reproduce missing"),
|
|
125
|
+
("Expected Behavior", "Expected behavior missing"),
|
|
126
|
+
("Actual Behavior", "Actual behavior missing"),
|
|
127
|
+
("Reproducibility", "Reproducibility not specified"),
|
|
128
|
+
("Severity", "Severity not specified"),
|
|
129
|
+
("Workaround", "Workaround not specified"),
|
|
130
|
+
]
|
|
131
|
+
for section, label in checks:
|
|
132
|
+
if _content_missing_bug(report, section):
|
|
133
|
+
missing.append(label)
|
|
134
|
+
else:
|
|
135
|
+
checks = [
|
|
136
|
+
("Problem / Motivation", "Problem/motivation not filled"),
|
|
137
|
+
("Proposed Solution", "Proposed solution not filled"),
|
|
138
|
+
("Acceptance Criteria", "Acceptance criteria missing"),
|
|
139
|
+
]
|
|
140
|
+
for section, label in checks:
|
|
141
|
+
if _content_missing_feature(report, section):
|
|
142
|
+
missing.append(label)
|
|
143
|
+
return missing
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def _backlog_roots(repo_root: Path) -> Tuple[Path, Path, Path]:
|
|
147
|
+
backlog_root = repo_root / "docs" / "backlog"
|
|
148
|
+
return backlog_root / "planned", backlog_root / "completed", backlog_root / "proposed"
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def scan_backlog(repo_root: Path) -> Tuple[List[BacklogItem], List[BacklogItem], List[BacklogItem]]:
|
|
152
|
+
planned_dir, completed_dir, proposed_dir = _backlog_roots(repo_root)
|
|
153
|
+
planned = list(iter_backlog_items(planned_dir, kind="planned"))
|
|
154
|
+
completed = list(iter_backlog_items(completed_dir, kind="completed"))
|
|
155
|
+
proposed = list(iter_backlog_items(proposed_dir, kind="proposed"))
|
|
156
|
+
return planned, completed, proposed
|
|
157
|
+
|
|
158
|
+
|
|
159
|
+
def compute_duplicates(
|
|
160
|
+
*,
|
|
161
|
+
report: ReportRecord,
|
|
162
|
+
all_reports: List[ReportRecord],
|
|
163
|
+
backlog_planned: List[BacklogItem],
|
|
164
|
+
backlog_completed: List[BacklogItem],
|
|
165
|
+
k: int = 5,
|
|
166
|
+
) -> List[SimilarityCandidate]:
|
|
167
|
+
query = report.to_similarity_text()
|
|
168
|
+
|
|
169
|
+
report_candidates: List[Tuple[str, str]] = []
|
|
170
|
+
report_title: Dict[str, str] = {}
|
|
171
|
+
for r in all_reports:
|
|
172
|
+
if r.path == report.path:
|
|
173
|
+
continue
|
|
174
|
+
ref = str(r.path)
|
|
175
|
+
report_candidates.append((ref, r.to_similarity_text()))
|
|
176
|
+
report_title[ref] = r.header.title
|
|
177
|
+
|
|
178
|
+
planned_candidates: List[Tuple[str, str]] = []
|
|
179
|
+
planned_title: Dict[str, str] = {}
|
|
180
|
+
for item in backlog_planned:
|
|
181
|
+
ref = str(item.path)
|
|
182
|
+
planned_candidates.append((ref, item.to_similarity_text()))
|
|
183
|
+
planned_title[ref] = item.title
|
|
184
|
+
|
|
185
|
+
completed_candidates: List[Tuple[str, str]] = []
|
|
186
|
+
completed_title: Dict[str, str] = {}
|
|
187
|
+
for item in backlog_completed:
|
|
188
|
+
ref = str(item.path)
|
|
189
|
+
completed_candidates.append((ref, item.to_similarity_text()))
|
|
190
|
+
completed_title[ref] = item.title
|
|
191
|
+
|
|
192
|
+
out: List[SimilarityCandidate] = []
|
|
193
|
+
for ref, score in top_k_similar(query_text=query, candidates=report_candidates, k=k, min_score=0.30):
|
|
194
|
+
out.append(SimilarityCandidate(kind="report", ref=ref, score=score, title=report_title.get(ref, "")))
|
|
195
|
+
for ref, score in top_k_similar(query_text=query, candidates=planned_candidates, k=k, min_score=0.25):
|
|
196
|
+
out.append(SimilarityCandidate(kind="backlog_planned", ref=ref, score=score, title=planned_title.get(ref, "")))
|
|
197
|
+
for ref, score in top_k_similar(query_text=query, candidates=completed_candidates, k=k, min_score=0.25):
|
|
198
|
+
out.append(SimilarityCandidate(kind="backlog_completed", ref=ref, score=score, title=completed_title.get(ref, "")))
|
|
199
|
+
|
|
200
|
+
out.sort(key=lambda c: c.score, reverse=True)
|
|
201
|
+
return out[: max(0, int(k))]
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def triage_reports(
|
|
205
|
+
*,
|
|
206
|
+
gateway_data_dir: Path,
|
|
207
|
+
repo_root: Optional[Path],
|
|
208
|
+
write_drafts: bool = False,
|
|
209
|
+
enable_llm: bool = False,
|
|
210
|
+
) -> Dict[str, Any]:
|
|
211
|
+
gw_dir = Path(gateway_data_dir).expanduser().resolve()
|
|
212
|
+
reports = scan_gateway_reports(gateway_data_dir=gw_dir)
|
|
213
|
+
|
|
214
|
+
resolved_repo_root = repo_root
|
|
215
|
+
if resolved_repo_root is None:
|
|
216
|
+
resolved_repo_root = find_repo_root(Path.cwd())
|
|
217
|
+
|
|
218
|
+
backlog_planned: List[BacklogItem] = []
|
|
219
|
+
backlog_completed: List[BacklogItem] = []
|
|
220
|
+
backlog_proposed: List[BacklogItem] = []
|
|
221
|
+
if resolved_repo_root is not None:
|
|
222
|
+
backlog_planned, backlog_completed, backlog_proposed = scan_backlog(resolved_repo_root)
|
|
223
|
+
|
|
224
|
+
proposed_by_report_relpath: Dict[str, BacklogItem] = {}
|
|
225
|
+
proposed_by_report_id: Dict[str, BacklogItem] = {}
|
|
226
|
+
for item in backlog_proposed:
|
|
227
|
+
rel = str(getattr(item, "source_report_relpath", "") or "").strip()
|
|
228
|
+
if rel:
|
|
229
|
+
prev = proposed_by_report_relpath.get(rel)
|
|
230
|
+
if prev is None or int(getattr(item, "item_id", 0)) > int(getattr(prev, "item_id", 0)):
|
|
231
|
+
proposed_by_report_relpath[rel] = item
|
|
232
|
+
rid = str(getattr(item, "source_report_id", "") or "").strip()
|
|
233
|
+
if rid:
|
|
234
|
+
prev = proposed_by_report_id.get(rid)
|
|
235
|
+
if prev is None or int(getattr(item, "item_id", 0)) > int(getattr(prev, "item_id", 0)):
|
|
236
|
+
proposed_by_report_id[rid] = item
|
|
237
|
+
|
|
238
|
+
qdir = decisions_dir(gateway_data_dir=gw_dir)
|
|
239
|
+
|
|
240
|
+
llm_cfg = load_llm_assist_config()
|
|
241
|
+
llm_enabled = bool(enable_llm) or bool(llm_cfg.get("enabled"))
|
|
242
|
+
|
|
243
|
+
backlog_root = (resolved_repo_root / "docs" / "backlog") if resolved_repo_root else None
|
|
244
|
+
allocator = BacklogIdAllocator.from_backlog_root(backlog_root) if (write_drafts and backlog_root) else None
|
|
245
|
+
|
|
246
|
+
updated: List[TriageDecision] = []
|
|
247
|
+
wrote_drafts: List[str] = []
|
|
248
|
+
|
|
249
|
+
for report in reports:
|
|
250
|
+
try:
|
|
251
|
+
relpath = str(report.path.relative_to(gw_dir))
|
|
252
|
+
except Exception:
|
|
253
|
+
relpath = str(report.path)
|
|
254
|
+
|
|
255
|
+
did = decision_id_for_report(report_relpath=relpath)
|
|
256
|
+
missing = compute_missing_fields(report)
|
|
257
|
+
dups = compute_duplicates(
|
|
258
|
+
report=report,
|
|
259
|
+
all_reports=reports,
|
|
260
|
+
backlog_planned=backlog_planned,
|
|
261
|
+
backlog_completed=backlog_completed,
|
|
262
|
+
k=5,
|
|
263
|
+
)
|
|
264
|
+
|
|
265
|
+
llm_suggestion = None
|
|
266
|
+
if llm_enabled:
|
|
267
|
+
normalized = {
|
|
268
|
+
"report_type": report.report_type,
|
|
269
|
+
"title": report.header.title,
|
|
270
|
+
"description": report.description,
|
|
271
|
+
"session_id": report.header.session_id,
|
|
272
|
+
"relevant_run_id": report.header.active_run_id,
|
|
273
|
+
"workflow_id": report.header.workflow_id,
|
|
274
|
+
"client": report.header.client,
|
|
275
|
+
"provider": report.header.provider,
|
|
276
|
+
"model": report.header.model,
|
|
277
|
+
"missing_fields": list(missing),
|
|
278
|
+
"duplicates": [asdict(d) for d in dups],
|
|
279
|
+
}
|
|
280
|
+
suggestion, _err = llm_assist(
|
|
281
|
+
normalized_input=normalized,
|
|
282
|
+
base_url=str(llm_cfg.get("base_url") or ""),
|
|
283
|
+
model=str(llm_cfg.get("model") or ""),
|
|
284
|
+
api_key=str(llm_cfg.get("api_key") or ""),
|
|
285
|
+
temperature=float(llm_cfg.get("temperature") or 0.2),
|
|
286
|
+
timeout_s=float(llm_cfg.get("timeout_s") or 30.0),
|
|
287
|
+
max_tokens=int(llm_cfg.get("max_tokens") or 800),
|
|
288
|
+
)
|
|
289
|
+
if suggestion is not None:
|
|
290
|
+
llm_suggestion = suggestion
|
|
291
|
+
|
|
292
|
+
decision = upsert_decision(
|
|
293
|
+
dir_path=qdir,
|
|
294
|
+
decision_id=did,
|
|
295
|
+
report_type=report.report_type,
|
|
296
|
+
report_relpath=relpath,
|
|
297
|
+
missing_fields=missing,
|
|
298
|
+
duplicates=dups,
|
|
299
|
+
llm_suggestion=llm_suggestion,
|
|
300
|
+
)
|
|
301
|
+
|
|
302
|
+
# Link to an existing proposed backlog item (auto-bridge or manual), so triage never generates duplicates.
|
|
303
|
+
if resolved_repo_root is not None and not decision.draft_relpath:
|
|
304
|
+
linked: Optional[BacklogItem] = None
|
|
305
|
+
if relpath in proposed_by_report_relpath:
|
|
306
|
+
linked = proposed_by_report_relpath.get(relpath)
|
|
307
|
+
elif report.header.report_id and report.header.report_id in proposed_by_report_id:
|
|
308
|
+
linked = proposed_by_report_id.get(report.header.report_id)
|
|
309
|
+
if linked is not None:
|
|
310
|
+
try:
|
|
311
|
+
decision.draft_relpath = str(linked.path.relative_to(resolved_repo_root))
|
|
312
|
+
except Exception:
|
|
313
|
+
decision.draft_relpath = str(linked.path)
|
|
314
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
315
|
+
|
|
316
|
+
# Skip draft creation if repo/backlog is unavailable.
|
|
317
|
+
if write_drafts and allocator is not None and resolved_repo_root is not None and backlog_root is not None:
|
|
318
|
+
# Only write once; do not overwrite manual edits.
|
|
319
|
+
if not decision.draft_relpath:
|
|
320
|
+
draft_path, _new_id = write_backlog_draft(
|
|
321
|
+
repo_root=resolved_repo_root,
|
|
322
|
+
backlog_root=backlog_root,
|
|
323
|
+
allocator=allocator,
|
|
324
|
+
report=report,
|
|
325
|
+
decision=decision,
|
|
326
|
+
llm_suggestion=llm_suggestion,
|
|
327
|
+
)
|
|
328
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
329
|
+
wrote_drafts.append(str(draft_path))
|
|
330
|
+
|
|
331
|
+
updated.append(decision)
|
|
332
|
+
|
|
333
|
+
return {
|
|
334
|
+
"gateway_data_dir": str(gw_dir),
|
|
335
|
+
"repo_root": str(resolved_repo_root) if resolved_repo_root else "",
|
|
336
|
+
"reports": len(reports),
|
|
337
|
+
"decisions_dir": str(qdir),
|
|
338
|
+
"updated_decisions": len(updated),
|
|
339
|
+
"drafts_written": wrote_drafts,
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
|
|
343
|
+
def apply_decision_action(
|
|
344
|
+
*,
|
|
345
|
+
gateway_data_dir: Path,
|
|
346
|
+
decision_id: str,
|
|
347
|
+
action: str,
|
|
348
|
+
repo_root: Optional[Path] = None,
|
|
349
|
+
write_draft_on_approve: bool = True,
|
|
350
|
+
defer_days: Optional[int] = None,
|
|
351
|
+
) -> Tuple[Optional[TriageDecision], Optional[str]]:
|
|
352
|
+
gw_dir = Path(gateway_data_dir).expanduser().resolve()
|
|
353
|
+
qdir = decisions_dir(gateway_data_dir=gw_dir)
|
|
354
|
+
decision = load_decision(dir_path=qdir, decision_id=str(decision_id))
|
|
355
|
+
if decision is None:
|
|
356
|
+
return None, "Decision not found"
|
|
357
|
+
|
|
358
|
+
act = str(action or "").strip().lower()
|
|
359
|
+
if act not in {"approve", "approved", "reject", "rejected", "defer", "deferred"}:
|
|
360
|
+
return None, "Unsupported action"
|
|
361
|
+
|
|
362
|
+
now = _now_utc_iso()
|
|
363
|
+
if act in {"approve", "approved"}:
|
|
364
|
+
decision.status = "approved"
|
|
365
|
+
decision.defer_until = ""
|
|
366
|
+
decision.updated_at = now
|
|
367
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
368
|
+
|
|
369
|
+
resolved_repo_root = repo_root or find_repo_root(Path.cwd())
|
|
370
|
+
if resolved_repo_root is None:
|
|
371
|
+
return decision, None
|
|
372
|
+
backlog_root = (resolved_repo_root / "docs" / "backlog").resolve()
|
|
373
|
+
planned_dir = (backlog_root / "planned").resolve()
|
|
374
|
+
|
|
375
|
+
if write_draft_on_approve and not decision.draft_relpath:
|
|
376
|
+
allocator = BacklogIdAllocator.from_backlog_root(backlog_root)
|
|
377
|
+
|
|
378
|
+
# Load report content (needed for proposed backlog creation).
|
|
379
|
+
report_path = gw_dir / decision.report_relpath
|
|
380
|
+
try:
|
|
381
|
+
report = parse_report_file(report_path)
|
|
382
|
+
except Exception:
|
|
383
|
+
return decision, None
|
|
384
|
+
|
|
385
|
+
suggestion = decision.llm_suggestion if isinstance(decision.llm_suggestion, dict) else None
|
|
386
|
+
_draft_path, _ = write_backlog_draft(
|
|
387
|
+
repo_root=resolved_repo_root,
|
|
388
|
+
backlog_root=backlog_root,
|
|
389
|
+
allocator=allocator,
|
|
390
|
+
report=report,
|
|
391
|
+
decision=decision,
|
|
392
|
+
llm_suggestion=suggestion,
|
|
393
|
+
)
|
|
394
|
+
decision.updated_at = _now_utc_iso()
|
|
395
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
396
|
+
|
|
397
|
+
# If a proposed backlog exists, treat approval as elevation to planned.
|
|
398
|
+
if decision.draft_relpath:
|
|
399
|
+
src = (resolved_repo_root / decision.draft_relpath).resolve()
|
|
400
|
+
try:
|
|
401
|
+
src.relative_to(backlog_root)
|
|
402
|
+
except Exception:
|
|
403
|
+
return decision, None
|
|
404
|
+
|
|
405
|
+
# Only elevate from proposed -> planned (avoid moving already planned/completed items).
|
|
406
|
+
try:
|
|
407
|
+
rel = src.relative_to(backlog_root)
|
|
408
|
+
except Exception:
|
|
409
|
+
rel = None
|
|
410
|
+
if rel and rel.parts and rel.parts[0] == "proposed":
|
|
411
|
+
planned_dir.mkdir(parents=True, exist_ok=True)
|
|
412
|
+
dest = (planned_dir / src.name).resolve()
|
|
413
|
+
try:
|
|
414
|
+
dest.relative_to(planned_dir)
|
|
415
|
+
except Exception:
|
|
416
|
+
return decision, "Invalid planned path"
|
|
417
|
+
if dest.exists():
|
|
418
|
+
# Already elevated (or name collision); do not error.
|
|
419
|
+
try:
|
|
420
|
+
decision.draft_relpath = str(dest.relative_to(resolved_repo_root))
|
|
421
|
+
decision.updated_at = _now_utc_iso()
|
|
422
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
423
|
+
except Exception:
|
|
424
|
+
pass
|
|
425
|
+
return decision, None
|
|
426
|
+
try:
|
|
427
|
+
src.rename(dest)
|
|
428
|
+
except Exception as e:
|
|
429
|
+
return decision, f"Failed to elevate backlog item: {e}"
|
|
430
|
+
try:
|
|
431
|
+
decision.draft_relpath = str(dest.relative_to(resolved_repo_root))
|
|
432
|
+
except Exception:
|
|
433
|
+
decision.draft_relpath = str(dest)
|
|
434
|
+
decision.updated_at = _now_utc_iso()
|
|
435
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
436
|
+
return decision, None
|
|
437
|
+
|
|
438
|
+
if act in {"reject", "rejected"}:
|
|
439
|
+
decision.status = "rejected"
|
|
440
|
+
decision.defer_until = ""
|
|
441
|
+
decision.updated_at = now
|
|
442
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
443
|
+
resolved_repo_root = repo_root or find_repo_root(Path.cwd())
|
|
444
|
+
if resolved_repo_root is None or not decision.draft_relpath:
|
|
445
|
+
return decision, None
|
|
446
|
+
|
|
447
|
+
backlog_root = (resolved_repo_root / "docs" / "backlog").resolve()
|
|
448
|
+
deprecated_dir = (backlog_root / "deprecated").resolve()
|
|
449
|
+
|
|
450
|
+
src = (resolved_repo_root / decision.draft_relpath).resolve()
|
|
451
|
+
try:
|
|
452
|
+
src.relative_to(backlog_root)
|
|
453
|
+
except Exception:
|
|
454
|
+
return decision, None
|
|
455
|
+
|
|
456
|
+
# Only move from proposed -> deprecated (avoid touching planned/completed).
|
|
457
|
+
try:
|
|
458
|
+
rel = src.relative_to(backlog_root)
|
|
459
|
+
except Exception:
|
|
460
|
+
rel = None
|
|
461
|
+
if rel and rel.parts and rel.parts[0] == "proposed":
|
|
462
|
+
deprecated_dir.mkdir(parents=True, exist_ok=True)
|
|
463
|
+
dest = (deprecated_dir / src.name).resolve()
|
|
464
|
+
try:
|
|
465
|
+
dest.relative_to(deprecated_dir)
|
|
466
|
+
except Exception:
|
|
467
|
+
return decision, "Invalid deprecated path"
|
|
468
|
+
if dest.exists():
|
|
469
|
+
try:
|
|
470
|
+
decision.draft_relpath = str(dest.relative_to(resolved_repo_root))
|
|
471
|
+
decision.updated_at = _now_utc_iso()
|
|
472
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
473
|
+
except Exception:
|
|
474
|
+
pass
|
|
475
|
+
return decision, None
|
|
476
|
+
try:
|
|
477
|
+
src.rename(dest)
|
|
478
|
+
except Exception as e:
|
|
479
|
+
return decision, f"Failed to deprecate backlog item: {e}"
|
|
480
|
+
try:
|
|
481
|
+
decision.draft_relpath = str(dest.relative_to(resolved_repo_root))
|
|
482
|
+
except Exception:
|
|
483
|
+
decision.draft_relpath = str(dest)
|
|
484
|
+
decision.updated_at = _now_utc_iso()
|
|
485
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
486
|
+
return decision, None
|
|
487
|
+
|
|
488
|
+
# defer
|
|
489
|
+
days = None
|
|
490
|
+
if defer_days is not None:
|
|
491
|
+
try:
|
|
492
|
+
days = max(1, int(defer_days))
|
|
493
|
+
except Exception:
|
|
494
|
+
days = None
|
|
495
|
+
if days is None:
|
|
496
|
+
# Defer duration is provided through env for v0 (e.g. action=defer and ABSTRACT_TRIAGE_DEFER_DAYS=7).
|
|
497
|
+
days_raw = _env("ABSTRACT_TRIAGE_DEFER_DAYS", "ABSTRACTGATEWAY_TRIAGE_DEFER_DAYS") or "1"
|
|
498
|
+
try:
|
|
499
|
+
days = max(1, int(days_raw))
|
|
500
|
+
except Exception:
|
|
501
|
+
days = 1
|
|
502
|
+
until = (datetime.datetime.now(datetime.timezone.utc) + datetime.timedelta(days=days)).isoformat()
|
|
503
|
+
decision.status = "deferred"
|
|
504
|
+
decision.defer_until = until
|
|
505
|
+
decision.updated_at = now
|
|
506
|
+
save_decision(dir_path=qdir, decision=decision)
|
|
507
|
+
return decision, None
|
|
@@ -0,0 +1,142 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import datetime
|
|
4
|
+
import hashlib
|
|
5
|
+
import json
|
|
6
|
+
from dataclasses import asdict
|
|
7
|
+
from pathlib import Path
|
|
8
|
+
from typing import Any, Dict, List, Optional
|
|
9
|
+
|
|
10
|
+
from .report_models import SimilarityCandidate, TriageDecision
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def _now_utc_iso() -> str:
|
|
14
|
+
return datetime.datetime.now(datetime.timezone.utc).isoformat()
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
def decision_id_for_report(*, report_relpath: str) -> str:
|
|
18
|
+
# Stable id so repeated triage runs update the same decision entry.
|
|
19
|
+
h = hashlib.sha1(str(report_relpath).encode("utf-8")).hexdigest()
|
|
20
|
+
return h[:16]
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def decisions_dir(*, gateway_data_dir: Path) -> Path:
|
|
24
|
+
out = Path(gateway_data_dir).expanduser().resolve() / "triage_queue"
|
|
25
|
+
out.mkdir(parents=True, exist_ok=True)
|
|
26
|
+
return out
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def _decision_path(*, dir_path: Path, decision_id: str) -> Path:
|
|
30
|
+
safe = "".join([c for c in str(decision_id or "") if c.isalnum() or c in {"_", "-"}]).strip()
|
|
31
|
+
safe = safe or "decision"
|
|
32
|
+
return Path(dir_path) / f"{safe}.json"
|
|
33
|
+
|
|
34
|
+
|
|
35
|
+
def load_decision(*, dir_path: Path, decision_id: str) -> Optional[TriageDecision]:
|
|
36
|
+
path = _decision_path(dir_path=dir_path, decision_id=decision_id)
|
|
37
|
+
if not path.exists():
|
|
38
|
+
return None
|
|
39
|
+
try:
|
|
40
|
+
data = json.loads(path.read_text(encoding="utf-8"))
|
|
41
|
+
except Exception:
|
|
42
|
+
return None
|
|
43
|
+
if not isinstance(data, dict):
|
|
44
|
+
return None
|
|
45
|
+
|
|
46
|
+
try:
|
|
47
|
+
dup_raw = data.get("duplicates") or []
|
|
48
|
+
dups: List[SimilarityCandidate] = []
|
|
49
|
+
if isinstance(dup_raw, list):
|
|
50
|
+
for item in dup_raw:
|
|
51
|
+
if not isinstance(item, dict):
|
|
52
|
+
continue
|
|
53
|
+
kind = str(item.get("kind") or "").strip() or "report"
|
|
54
|
+
ref = str(item.get("ref") or "").strip()
|
|
55
|
+
title = str(item.get("title") or "").strip()
|
|
56
|
+
try:
|
|
57
|
+
score = float(item.get("score"))
|
|
58
|
+
except Exception:
|
|
59
|
+
score = 0.0
|
|
60
|
+
if not ref:
|
|
61
|
+
continue
|
|
62
|
+
dups.append(SimilarityCandidate(kind=kind, ref=ref, score=score, title=title))
|
|
63
|
+
except Exception:
|
|
64
|
+
dups = []
|
|
65
|
+
|
|
66
|
+
missing = data.get("missing_fields") or []
|
|
67
|
+
missing2 = [str(m).strip() for m in missing if isinstance(m, str) and m.strip()] if isinstance(missing, list) else []
|
|
68
|
+
|
|
69
|
+
decision = TriageDecision(
|
|
70
|
+
decision_id=str(data.get("decision_id") or decision_id),
|
|
71
|
+
report_type=str(data.get("report_type") or "bug"), # type: ignore[arg-type]
|
|
72
|
+
report_relpath=str(data.get("report_relpath") or ""),
|
|
73
|
+
status=str(data.get("status") or "pending"), # type: ignore[arg-type]
|
|
74
|
+
created_at=str(data.get("created_at") or ""),
|
|
75
|
+
updated_at=str(data.get("updated_at") or ""),
|
|
76
|
+
defer_until=str(data.get("defer_until") or ""),
|
|
77
|
+
missing_fields=missing2,
|
|
78
|
+
duplicates=dups,
|
|
79
|
+
draft_relpath=str(data.get("draft_relpath") or ""),
|
|
80
|
+
llm_suggestion=data.get("llm_suggestion") if isinstance(data.get("llm_suggestion"), dict) else {},
|
|
81
|
+
)
|
|
82
|
+
return decision
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
def save_decision(*, dir_path: Path, decision: TriageDecision) -> Path:
|
|
86
|
+
path = _decision_path(dir_path=dir_path, decision_id=decision.decision_id)
|
|
87
|
+
payload: Dict[str, Any] = asdict(decision)
|
|
88
|
+
# Convert dataclass list to dict list.
|
|
89
|
+
payload["duplicates"] = [asdict(d) for d in decision.duplicates]
|
|
90
|
+
if not payload.get("updated_at"):
|
|
91
|
+
payload["updated_at"] = _now_utc_iso()
|
|
92
|
+
if not payload.get("created_at"):
|
|
93
|
+
payload["created_at"] = payload["updated_at"]
|
|
94
|
+
path.write_text(json.dumps(payload, ensure_ascii=False, indent=2, sort_keys=True) + "\n", encoding="utf-8")
|
|
95
|
+
return path
|
|
96
|
+
|
|
97
|
+
|
|
98
|
+
def upsert_decision(
|
|
99
|
+
*,
|
|
100
|
+
dir_path: Path,
|
|
101
|
+
decision_id: str,
|
|
102
|
+
report_type: str,
|
|
103
|
+
report_relpath: str,
|
|
104
|
+
missing_fields: List[str],
|
|
105
|
+
duplicates: List[SimilarityCandidate],
|
|
106
|
+
llm_suggestion: Optional[Dict[str, Any]] = None,
|
|
107
|
+
) -> TriageDecision:
|
|
108
|
+
existing = load_decision(dir_path=dir_path, decision_id=decision_id)
|
|
109
|
+
now = _now_utc_iso()
|
|
110
|
+
|
|
111
|
+
if existing is None:
|
|
112
|
+
existing = TriageDecision(
|
|
113
|
+
decision_id=decision_id,
|
|
114
|
+
report_type=report_type, # type: ignore[arg-type]
|
|
115
|
+
report_relpath=report_relpath,
|
|
116
|
+
status="pending",
|
|
117
|
+
created_at=now,
|
|
118
|
+
updated_at=now,
|
|
119
|
+
)
|
|
120
|
+
|
|
121
|
+
# Keep status/defer_until/draft_relpath if already set; refresh computed fields.
|
|
122
|
+
existing.missing_fields = list(missing_fields)
|
|
123
|
+
existing.duplicates = list(duplicates)
|
|
124
|
+
existing.updated_at = now
|
|
125
|
+
if llm_suggestion is not None:
|
|
126
|
+
existing.llm_suggestion = dict(llm_suggestion)
|
|
127
|
+
|
|
128
|
+
save_decision(dir_path=dir_path, decision=existing)
|
|
129
|
+
return existing
|
|
130
|
+
|
|
131
|
+
|
|
132
|
+
def iter_decisions(dir_path: Path) -> List[TriageDecision]:
|
|
133
|
+
out: List[TriageDecision] = []
|
|
134
|
+
if not dir_path.exists():
|
|
135
|
+
return out
|
|
136
|
+
for p in sorted(dir_path.glob("*.json")):
|
|
137
|
+
did = p.stem
|
|
138
|
+
d = load_decision(dir_path=dir_path, decision_id=did)
|
|
139
|
+
if d is not None:
|
|
140
|
+
out.append(d)
|
|
141
|
+
return out
|
|
142
|
+
|