abstractgateway 0.1.0__py3-none-any.whl → 0.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- abstractgateway/__init__.py +1 -2
- abstractgateway/__main__.py +7 -0
- abstractgateway/app.py +4 -4
- abstractgateway/cli.py +568 -8
- abstractgateway/config.py +15 -5
- abstractgateway/embeddings_config.py +45 -0
- abstractgateway/host_metrics.py +274 -0
- abstractgateway/hosts/bundle_host.py +528 -55
- abstractgateway/hosts/visualflow_host.py +30 -3
- abstractgateway/integrations/__init__.py +2 -0
- abstractgateway/integrations/email_bridge.py +782 -0
- abstractgateway/integrations/telegram_bridge.py +534 -0
- abstractgateway/maintenance/__init__.py +5 -0
- abstractgateway/maintenance/action_tokens.py +100 -0
- abstractgateway/maintenance/backlog_exec_runner.py +1592 -0
- abstractgateway/maintenance/backlog_parser.py +184 -0
- abstractgateway/maintenance/draft_generator.py +451 -0
- abstractgateway/maintenance/llm_assist.py +212 -0
- abstractgateway/maintenance/notifier.py +109 -0
- abstractgateway/maintenance/process_manager.py +1064 -0
- abstractgateway/maintenance/report_models.py +81 -0
- abstractgateway/maintenance/report_parser.py +219 -0
- abstractgateway/maintenance/text_similarity.py +123 -0
- abstractgateway/maintenance/triage.py +507 -0
- abstractgateway/maintenance/triage_queue.py +142 -0
- abstractgateway/migrate.py +155 -0
- abstractgateway/routes/__init__.py +2 -2
- abstractgateway/routes/gateway.py +10817 -179
- abstractgateway/routes/triage.py +118 -0
- abstractgateway/runner.py +689 -14
- abstractgateway/security/gateway_security.py +425 -110
- abstractgateway/service.py +213 -6
- abstractgateway/stores.py +64 -4
- abstractgateway/workflow_deprecations.py +225 -0
- abstractgateway-0.1.1.dist-info/METADATA +135 -0
- abstractgateway-0.1.1.dist-info/RECORD +40 -0
- abstractgateway-0.1.0.dist-info/METADATA +0 -101
- abstractgateway-0.1.0.dist-info/RECORD +0 -18
- {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/WHEEL +0 -0
- {abstractgateway-0.1.0.dist-info → abstractgateway-0.1.1.dist-info}/entry_points.txt +0 -0
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import re
|
|
4
|
+
from dataclasses import dataclass
|
|
5
|
+
from pathlib import Path
|
|
6
|
+
from typing import Iterable, List, Optional, Tuple
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
_H1_RE = re.compile(r"^#\s+(?P<id>\d+)-(?P<pkg>[^:]+)\s*:\s*(?P<title>.+?)\s*$")
|
|
10
|
+
_H2_RE = re.compile(r"^##\s+(?P<name>.+?)\s*$")
|
|
11
|
+
_META_TYPE_RE = re.compile(r"^>\s*type\s*:\s*(?P<type>[a-zA-Z0-9_-]+)\s*$", re.IGNORECASE)
|
|
12
|
+
_TITLE_TYPE_RE = re.compile(r"^\[(?P<type>bug|feature|task)\]\s*(?P<rest>.*)$", re.IGNORECASE)
|
|
13
|
+
_META_SOURCE_REPORT_RELPATH_RE = re.compile(r"^>\s*source\s+report\s+relpath\s*:\s*(?P<relpath>.+?)\s*$", re.IGNORECASE)
|
|
14
|
+
_META_SOURCE_REPORT_ID_RE = re.compile(r"^>\s*source\s+report\s+id\s*:\s*(?P<id>.+?)\s*$", re.IGNORECASE)
|
|
15
|
+
_INFER_SOURCE_REPORT_RELPATH_RE = re.compile(r"(?P<relpath>(?:bug_reports|feature_requests)/[A-Za-z0-9._-]{1,220}\.md)")
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
@dataclass(frozen=True)
|
|
19
|
+
class BacklogItem:
|
|
20
|
+
kind: str # planned|completed|proposed
|
|
21
|
+
path: Path
|
|
22
|
+
item_id: int
|
|
23
|
+
package: str
|
|
24
|
+
title: str
|
|
25
|
+
task_type: str = "task" # bug|feature|task
|
|
26
|
+
summary: str = ""
|
|
27
|
+
source_report_relpath: str = ""
|
|
28
|
+
source_report_id: str = ""
|
|
29
|
+
|
|
30
|
+
def ref(self) -> str:
|
|
31
|
+
return str(self.path)
|
|
32
|
+
|
|
33
|
+
def to_similarity_text(self) -> str:
|
|
34
|
+
bits: List[str] = []
|
|
35
|
+
bits.append(f"{self.package}: {self.title}")
|
|
36
|
+
if self.summary:
|
|
37
|
+
bits.append(self.summary)
|
|
38
|
+
return "\n\n".join([b for b in bits if b]).strip()
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def _parse_summary(text: str) -> str:
|
|
42
|
+
lines = text.splitlines()
|
|
43
|
+
in_summary = False
|
|
44
|
+
acc: List[str] = []
|
|
45
|
+
for raw in lines:
|
|
46
|
+
m = _H2_RE.match(raw.strip())
|
|
47
|
+
if m:
|
|
48
|
+
name = str(m.group("name") or "").strip().lower()
|
|
49
|
+
if name == "summary":
|
|
50
|
+
in_summary = True
|
|
51
|
+
acc = []
|
|
52
|
+
continue
|
|
53
|
+
if in_summary:
|
|
54
|
+
break
|
|
55
|
+
if in_summary:
|
|
56
|
+
acc.append(raw)
|
|
57
|
+
return "\n".join(acc).strip()
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
def _normalize_task_type(raw: str) -> Optional[str]:
|
|
61
|
+
t = str(raw or "").strip().lower()
|
|
62
|
+
if t in {"bug", "feature", "task"}:
|
|
63
|
+
return t
|
|
64
|
+
return None
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def _parse_task_type(text: str, title: str) -> Tuple[str, str]:
|
|
68
|
+
"""Return (task_type, normalized_title)."""
|
|
69
|
+
meta_type: Optional[str] = None
|
|
70
|
+
for raw in text.splitlines()[:80]:
|
|
71
|
+
line = raw.strip()
|
|
72
|
+
if not line:
|
|
73
|
+
continue
|
|
74
|
+
m = _META_TYPE_RE.match(line)
|
|
75
|
+
if not m:
|
|
76
|
+
continue
|
|
77
|
+
meta_type = _normalize_task_type(m.group("type"))
|
|
78
|
+
if meta_type:
|
|
79
|
+
break
|
|
80
|
+
|
|
81
|
+
title_str = str(title or "").strip()
|
|
82
|
+
m2 = _TITLE_TYPE_RE.match(title_str)
|
|
83
|
+
if m2:
|
|
84
|
+
prefix_type = _normalize_task_type(m2.group("type"))
|
|
85
|
+
rest = str(m2.group("rest") or "").strip()
|
|
86
|
+
if not meta_type and prefix_type:
|
|
87
|
+
meta_type = prefix_type
|
|
88
|
+
title_str = rest or title_str
|
|
89
|
+
|
|
90
|
+
# Best-effort inference for legacy items (before typed backlog was introduced).
|
|
91
|
+
if not meta_type:
|
|
92
|
+
lowered = text.lower()
|
|
93
|
+
if "bug_reports/" in lowered:
|
|
94
|
+
meta_type = "bug"
|
|
95
|
+
elif "feature_requests/" in lowered:
|
|
96
|
+
meta_type = "feature"
|
|
97
|
+
|
|
98
|
+
return (meta_type or "task"), title_str
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
def parse_backlog_item(path: Path, *, kind: str) -> Optional[BacklogItem]:
|
|
102
|
+
try:
|
|
103
|
+
text = path.read_text(encoding="utf-8", errors="replace")
|
|
104
|
+
except Exception:
|
|
105
|
+
return None
|
|
106
|
+
|
|
107
|
+
first_h1 = ""
|
|
108
|
+
for raw in text.splitlines():
|
|
109
|
+
line = raw.strip()
|
|
110
|
+
if not line:
|
|
111
|
+
continue
|
|
112
|
+
if line.startswith("#"):
|
|
113
|
+
first_h1 = line
|
|
114
|
+
break
|
|
115
|
+
|
|
116
|
+
m = _H1_RE.match(first_h1)
|
|
117
|
+
if not m:
|
|
118
|
+
return None
|
|
119
|
+
|
|
120
|
+
try:
|
|
121
|
+
item_id = int(m.group("id"))
|
|
122
|
+
except Exception:
|
|
123
|
+
return None
|
|
124
|
+
|
|
125
|
+
pkg = str(m.group("pkg") or "").strip().lower()
|
|
126
|
+
raw_title = str(m.group("title") or "").strip()
|
|
127
|
+
task_type, title = _parse_task_type(text, raw_title)
|
|
128
|
+
summary = _parse_summary(text)
|
|
129
|
+
|
|
130
|
+
source_report_relpath = ""
|
|
131
|
+
source_report_id = ""
|
|
132
|
+
lines = text.splitlines()
|
|
133
|
+
for raw in lines[:120]:
|
|
134
|
+
line = raw.strip()
|
|
135
|
+
if not line or not line.startswith(">"):
|
|
136
|
+
continue
|
|
137
|
+
m_sr = _META_SOURCE_REPORT_RELPATH_RE.match(line)
|
|
138
|
+
if m_sr:
|
|
139
|
+
source_report_relpath = str(m_sr.group("relpath") or "").strip()
|
|
140
|
+
continue
|
|
141
|
+
m_id = _META_SOURCE_REPORT_ID_RE.match(line)
|
|
142
|
+
if m_id:
|
|
143
|
+
source_report_id = str(m_id.group("id") or "").strip()
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
if not source_report_relpath:
|
|
147
|
+
# Best-effort legacy inference: older drafts stored an absolute path in "Source report",
|
|
148
|
+
# which still contains the stable folder+filename segment.
|
|
149
|
+
hay = "\n".join(lines[:300])
|
|
150
|
+
m_inf = _INFER_SOURCE_REPORT_RELPATH_RE.search(hay)
|
|
151
|
+
if m_inf:
|
|
152
|
+
source_report_relpath = str(m_inf.group("relpath") or "").strip()
|
|
153
|
+
|
|
154
|
+
return BacklogItem(
|
|
155
|
+
kind=kind,
|
|
156
|
+
path=path,
|
|
157
|
+
item_id=item_id,
|
|
158
|
+
package=pkg,
|
|
159
|
+
title=title,
|
|
160
|
+
task_type=task_type,
|
|
161
|
+
summary=summary,
|
|
162
|
+
source_report_relpath=source_report_relpath,
|
|
163
|
+
source_report_id=source_report_id,
|
|
164
|
+
)
|
|
165
|
+
|
|
166
|
+
|
|
167
|
+
def iter_backlog_items(dir_path: Path, *, kind: str) -> Iterable[BacklogItem]:
|
|
168
|
+
if not dir_path.exists() or not dir_path.is_dir():
|
|
169
|
+
return []
|
|
170
|
+
out: List[BacklogItem] = []
|
|
171
|
+
for p in sorted(dir_path.glob("*.md")):
|
|
172
|
+
item = parse_backlog_item(p, kind=kind)
|
|
173
|
+
if item is not None:
|
|
174
|
+
out.append(item)
|
|
175
|
+
return out
|
|
176
|
+
|
|
177
|
+
|
|
178
|
+
def max_backlog_id(dirs: List[Tuple[Path, str]]) -> int:
|
|
179
|
+
max_id = 0
|
|
180
|
+
for d, kind in dirs:
|
|
181
|
+
for item in iter_backlog_items(d, kind=kind):
|
|
182
|
+
if item.item_id > max_id:
|
|
183
|
+
max_id = item.item_id
|
|
184
|
+
return max_id
|
|
@@ -0,0 +1,451 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import datetime
|
|
4
|
+
import re
|
|
5
|
+
from dataclasses import dataclass
|
|
6
|
+
from pathlib import Path
|
|
7
|
+
from typing import Any, Dict, List, Optional, Tuple
|
|
8
|
+
|
|
9
|
+
from .backlog_parser import max_backlog_id
|
|
10
|
+
from .report_models import ReportRecord, TriageDecision
|
|
11
|
+
from .text_similarity import similarity, tokenize
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
def _now_local_timestamp() -> str:
|
|
15
|
+
# Keep local time to match existing backlog conventions.
|
|
16
|
+
return datetime.datetime.now().astimezone().strftime("%Y-%m-%d %H:%M:%S %z")
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _slug(text: str) -> str:
|
|
20
|
+
s = str(text or "").strip().lower()
|
|
21
|
+
s = re.sub(r"[^a-z0-9]+", "-", s).strip("-")
|
|
22
|
+
s = s[:80].strip("-") or "item"
|
|
23
|
+
return s
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _guess_package(report: ReportRecord) -> str:
|
|
27
|
+
client = str(report.header.client or "").strip().lower()
|
|
28
|
+
if "abstractcode" in client:
|
|
29
|
+
return "abstractcode"
|
|
30
|
+
if "abstractgateway" in client or "gateway" in client:
|
|
31
|
+
return "abstractgateway"
|
|
32
|
+
if "abstractruntime" in client or "runtime" in client:
|
|
33
|
+
return "abstractruntime"
|
|
34
|
+
# Fallback: cross-cutting.
|
|
35
|
+
return "framework"
|
|
36
|
+
|
|
37
|
+
|
|
38
|
+
@dataclass
|
|
39
|
+
class BacklogIdAllocator:
|
|
40
|
+
next_id: int
|
|
41
|
+
|
|
42
|
+
@classmethod
|
|
43
|
+
def from_backlog_root(cls, backlog_root: Path) -> "BacklogIdAllocator":
|
|
44
|
+
planned = backlog_root / "planned"
|
|
45
|
+
completed = backlog_root / "completed"
|
|
46
|
+
proposed = backlog_root / "proposed"
|
|
47
|
+
recurrent = backlog_root / "recurrent"
|
|
48
|
+
deprecated = backlog_root / "deprecated"
|
|
49
|
+
trash = backlog_root / "trash"
|
|
50
|
+
max_id = max_backlog_id(
|
|
51
|
+
[
|
|
52
|
+
(planned, "planned"),
|
|
53
|
+
(completed, "completed"),
|
|
54
|
+
(proposed, "proposed"),
|
|
55
|
+
(recurrent, "recurrent"),
|
|
56
|
+
(deprecated, "deprecated"),
|
|
57
|
+
(trash, "trash"),
|
|
58
|
+
]
|
|
59
|
+
)
|
|
60
|
+
return cls(next_id=max_id + 1)
|
|
61
|
+
|
|
62
|
+
def allocate(self) -> int:
|
|
63
|
+
v = int(self.next_id)
|
|
64
|
+
self.next_id = v + 1
|
|
65
|
+
return v
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
def _draft_title(report: ReportRecord) -> str:
|
|
69
|
+
def _from_desc(desc: str) -> str:
|
|
70
|
+
raw = str(desc or "").replace("\r", "")
|
|
71
|
+
for line in raw.split("\n"):
|
|
72
|
+
s = line.strip()
|
|
73
|
+
if not s:
|
|
74
|
+
continue
|
|
75
|
+
s2 = re.sub(r"^\s*/(bug|feature)\b\s*", "", s, flags=re.IGNORECASE).strip()
|
|
76
|
+
s2 = re.sub(r"\s+", " ", s2).strip()
|
|
77
|
+
return s2 or s
|
|
78
|
+
return ""
|
|
79
|
+
|
|
80
|
+
base = report.header.title.strip() if report.header.title else ""
|
|
81
|
+
desc_line = _from_desc(report.description)
|
|
82
|
+
|
|
83
|
+
# Prefer the report's first-line description when it looks like the stored title
|
|
84
|
+
# was previously truncated (older gateway versions clamped report titles).
|
|
85
|
+
if desc_line:
|
|
86
|
+
if not base:
|
|
87
|
+
base = desc_line
|
|
88
|
+
elif len(base) >= 100 and len(desc_line) > len(base) and desc_line.lower().startswith(base.lower()):
|
|
89
|
+
base = desc_line
|
|
90
|
+
|
|
91
|
+
if base:
|
|
92
|
+
base = re.sub(r"\s+", " ", base).strip()
|
|
93
|
+
if len(base) > 120:
|
|
94
|
+
base = base[:120].rstrip()
|
|
95
|
+
return base
|
|
96
|
+
return "Bug fix" if report.report_type == "bug" else "Feature request"
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
def _draft_summary(report: ReportRecord) -> str:
|
|
100
|
+
# In proposed backlog items, preserve the user's report description verbatim
|
|
101
|
+
# (never truncate). This is the "source of truth" for what was requested.
|
|
102
|
+
desc = str(report.description or "")
|
|
103
|
+
if desc.strip():
|
|
104
|
+
lines = desc.replace("\r\n", "\n").replace("\r", "\n").split("\n")
|
|
105
|
+
return "\n".join([f" {ln}" for ln in lines]).rstrip()
|
|
106
|
+
|
|
107
|
+
title = report.header.title.strip() if report.header.title else ""
|
|
108
|
+
if report.report_type == "bug":
|
|
109
|
+
t = title or "bug"
|
|
110
|
+
return (
|
|
111
|
+
f"Fix reported issue: {t}. Use the linked report/session/run artifacts to reproduce, identify root cause, and implement a minimal fix "
|
|
112
|
+
"with targeted tests (ADR-0019)."
|
|
113
|
+
)
|
|
114
|
+
t = title or "feature request"
|
|
115
|
+
return (
|
|
116
|
+
f"Implement requested feature: {t}. Use the linked report/session/run artifacts to capture context, define clear acceptance criteria, "
|
|
117
|
+
"and deliver a scoped change with minimal dependencies."
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
|
|
121
|
+
def _read_text_bounded(path: Path, *, max_chars: int = 80_000) -> str:
|
|
122
|
+
try:
|
|
123
|
+
text = path.read_text(encoding="utf-8", errors="replace")
|
|
124
|
+
except Exception:
|
|
125
|
+
return ""
|
|
126
|
+
if len(text) > int(max_chars):
|
|
127
|
+
return text[: int(max_chars)]
|
|
128
|
+
return text
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
_KEYWORD_BOOSTS: Dict[str, float] = {
|
|
132
|
+
# UX + theming.
|
|
133
|
+
"theme": 0.22,
|
|
134
|
+
"themes": 0.22,
|
|
135
|
+
"ui": 0.08,
|
|
136
|
+
"ux": 0.08,
|
|
137
|
+
# Durability / replay / observability.
|
|
138
|
+
"replay": 0.18,
|
|
139
|
+
"ledger": 0.18,
|
|
140
|
+
"history": 0.10,
|
|
141
|
+
"tool": 0.14,
|
|
142
|
+
"tools": 0.14,
|
|
143
|
+
"attachment": 0.14,
|
|
144
|
+
"attachments": 0.14,
|
|
145
|
+
# Security / auth.
|
|
146
|
+
"auth": 0.14,
|
|
147
|
+
"security": 0.14,
|
|
148
|
+
# Process.
|
|
149
|
+
"backlog": 0.10,
|
|
150
|
+
"triage": 0.10,
|
|
151
|
+
}
|
|
152
|
+
|
|
153
|
+
|
|
154
|
+
def _boosted_similarity(*, query_text: str, candidate_text: str) -> float:
|
|
155
|
+
"""Similarity with small keyword boosts for higher-signal terms.
|
|
156
|
+
|
|
157
|
+
The underlying similarity function is intentionally lightweight and tends to produce
|
|
158
|
+
low raw scores on long documents (ADRs). Keyword boosts help surface relevant items
|
|
159
|
+
without pretending they are definitive matches.
|
|
160
|
+
"""
|
|
161
|
+
|
|
162
|
+
base = similarity(query_text, candidate_text)
|
|
163
|
+
qt = set(tokenize(query_text))
|
|
164
|
+
ct = set(tokenize(candidate_text))
|
|
165
|
+
boost = 0.0
|
|
166
|
+
for kw, w in _KEYWORD_BOOSTS.items():
|
|
167
|
+
if kw in qt and kw in ct:
|
|
168
|
+
boost += float(w)
|
|
169
|
+
return min(1.0, base + min(boost, 0.35))
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def _related_adrs_markdown(*, repo_root: Optional[Path], report: ReportRecord, k: int = 4) -> str:
|
|
173
|
+
if repo_root is None:
|
|
174
|
+
return "- (repo unavailable)"
|
|
175
|
+
rr = Path(repo_root).expanduser().resolve()
|
|
176
|
+
adr_dir = (rr / "docs" / "adr").resolve()
|
|
177
|
+
if not adr_dir.exists():
|
|
178
|
+
return "- (no docs/adr directory)"
|
|
179
|
+
|
|
180
|
+
query = report.to_similarity_text()
|
|
181
|
+
candidates: List[Tuple[str, str]] = []
|
|
182
|
+
for p in sorted(adr_dir.glob("*.md")):
|
|
183
|
+
if p.name.lower() == "readme.md":
|
|
184
|
+
continue
|
|
185
|
+
rel = ""
|
|
186
|
+
try:
|
|
187
|
+
rel = str(p.relative_to(rr))
|
|
188
|
+
except Exception:
|
|
189
|
+
rel = str(p)
|
|
190
|
+
text = _read_text_bounded(p, max_chars=60_000)
|
|
191
|
+
if not text.strip():
|
|
192
|
+
continue
|
|
193
|
+
# Focus the candidate text on the ADR "front matter" to avoid long-body dilution.
|
|
194
|
+
head = "\n".join(text.splitlines()[:120]).strip()
|
|
195
|
+
cand_text = f"{p.name}\n{head}".strip()
|
|
196
|
+
candidates.append((rel, cand_text))
|
|
197
|
+
if len(candidates) >= 250:
|
|
198
|
+
break
|
|
199
|
+
|
|
200
|
+
if not candidates:
|
|
201
|
+
return "- (none found)"
|
|
202
|
+
|
|
203
|
+
scored: List[Tuple[str, float]] = []
|
|
204
|
+
for ref, text in candidates:
|
|
205
|
+
s = _boosted_similarity(query_text=query, candidate_text=text)
|
|
206
|
+
scored.append((ref, s))
|
|
207
|
+
scored.sort(key=lambda x: x[1], reverse=True)
|
|
208
|
+
top = scored[: max(0, int(k))]
|
|
209
|
+
top = [(ref, s) for ref, s in top if s >= 0.06]
|
|
210
|
+
if not top:
|
|
211
|
+
return "- (none)"
|
|
212
|
+
return "\n".join([f"- ({s:.2f}) `{ref}`" for ref, s in top]).strip()
|
|
213
|
+
|
|
214
|
+
|
|
215
|
+
def _related_backlog_markdown(*, repo_root: Optional[Path], report: ReportRecord, k: int = 8) -> str:
|
|
216
|
+
"""Best-effort related backlog pointers (fast enough for auto-bridge).
|
|
217
|
+
|
|
218
|
+
We intentionally include both planned (dependencies) and completed (related prior work).
|
|
219
|
+
This is deterministic and does not use an LLM.
|
|
220
|
+
"""
|
|
221
|
+
if repo_root is None:
|
|
222
|
+
return "- (repo unavailable)"
|
|
223
|
+
rr = Path(repo_root).expanduser().resolve()
|
|
224
|
+
planned_dir = (rr / "docs" / "backlog" / "planned").resolve()
|
|
225
|
+
completed_dir = (rr / "docs" / "backlog" / "completed").resolve()
|
|
226
|
+
if not planned_dir.exists() and not completed_dir.exists():
|
|
227
|
+
return "- (no backlog)"
|
|
228
|
+
|
|
229
|
+
try:
|
|
230
|
+
from .backlog_parser import iter_backlog_items # type: ignore
|
|
231
|
+
except Exception:
|
|
232
|
+
return "- (backlog parser unavailable)"
|
|
233
|
+
|
|
234
|
+
planned_items = list(iter_backlog_items(planned_dir, kind="planned")) if planned_dir.exists() else []
|
|
235
|
+
completed_items = list(iter_backlog_items(completed_dir, kind="completed")) if completed_dir.exists() else []
|
|
236
|
+
items = planned_items + completed_items
|
|
237
|
+
if not items:
|
|
238
|
+
return "- (none)"
|
|
239
|
+
|
|
240
|
+
query = report.to_similarity_text()
|
|
241
|
+
scored: List[Tuple[float, str, Any]] = []
|
|
242
|
+
for item in items[:1200]:
|
|
243
|
+
try:
|
|
244
|
+
rel = str(item.path.relative_to(rr))
|
|
245
|
+
except Exception:
|
|
246
|
+
rel = str(item.path)
|
|
247
|
+
text = item.to_similarity_text()
|
|
248
|
+
score = _boosted_similarity(query_text=query, candidate_text=text)
|
|
249
|
+
scored.append((score, rel, item))
|
|
250
|
+
|
|
251
|
+
scored.sort(key=lambda x: x[0], reverse=True)
|
|
252
|
+
top = [x for x in scored if x[0] >= 0.10][: max(0, int(k))]
|
|
253
|
+
if not top:
|
|
254
|
+
return "- (none)"
|
|
255
|
+
|
|
256
|
+
lines: List[str] = []
|
|
257
|
+
for score, ref, item in top:
|
|
258
|
+
title = str(getattr(item, "title", "") or "").strip()
|
|
259
|
+
pkg = str(getattr(item, "package", "") or "").strip()
|
|
260
|
+
kind = str(getattr(item, "kind", "") or "").strip()
|
|
261
|
+
label = f"{pkg}: {title}" if pkg and title else title or pkg or ref
|
|
262
|
+
kind_tag = f"{kind}" if kind else "backlog"
|
|
263
|
+
lines.append(f"- ({score:.2f}, {kind_tag}) {label} — `{ref}`")
|
|
264
|
+
return "\n".join(lines).strip()
|
|
265
|
+
|
|
266
|
+
|
|
267
|
+
def _format_related(report: ReportRecord, decision: TriageDecision, *, repo_root: Optional[Path] = None) -> str:
|
|
268
|
+
rel: list[str] = []
|
|
269
|
+
if str(getattr(decision, "report_relpath", "") or "").strip():
|
|
270
|
+
rel.append(f"- Source report relpath: `{decision.report_relpath}`")
|
|
271
|
+
|
|
272
|
+
# Prefer a repo-root relative path for docs portability when possible.
|
|
273
|
+
if repo_root is not None:
|
|
274
|
+
try:
|
|
275
|
+
rr = Path(repo_root).expanduser().resolve()
|
|
276
|
+
rp = Path(report.path).expanduser().resolve()
|
|
277
|
+
rel_repo = str(rp.relative_to(rr))
|
|
278
|
+
except Exception:
|
|
279
|
+
rel_repo = ""
|
|
280
|
+
if rel_repo:
|
|
281
|
+
rel.append(f"- Source report file (repo): `{rel_repo}`")
|
|
282
|
+
|
|
283
|
+
if report.header.report_id:
|
|
284
|
+
rel.append(f"- Report ID: `{report.header.report_id}`")
|
|
285
|
+
if report.header.session_id:
|
|
286
|
+
rel.append(f"- Session ID: `{report.header.session_id}`")
|
|
287
|
+
if report.header.active_run_id:
|
|
288
|
+
rel.append(f"- Relevant run ID: `{report.header.active_run_id}`")
|
|
289
|
+
if report.header.session_memory_run_id:
|
|
290
|
+
rel.append(f"- Session memory run ID (attachments): `{report.header.session_memory_run_id}`")
|
|
291
|
+
if decision.duplicates:
|
|
292
|
+
rel.append("- Possible duplicates:")
|
|
293
|
+
for d in decision.duplicates[:5]:
|
|
294
|
+
ref = str(getattr(d, "ref", "") or "").strip()
|
|
295
|
+
title = str(getattr(d, "title", "") or "").strip()
|
|
296
|
+
ref_out = ref
|
|
297
|
+
if repo_root is not None and ref:
|
|
298
|
+
try:
|
|
299
|
+
rr = Path(repo_root).expanduser().resolve()
|
|
300
|
+
rp = Path(ref).expanduser().resolve()
|
|
301
|
+
if str(rp).startswith(str(rr)):
|
|
302
|
+
ref_out = str(rp.relative_to(rr))
|
|
303
|
+
except Exception:
|
|
304
|
+
ref_out = ref
|
|
305
|
+
rel.append(f" - ({d.kind}, {d.score:.2f}) {title or ref_out}")
|
|
306
|
+
return "\n".join(rel).strip()
|
|
307
|
+
|
|
308
|
+
|
|
309
|
+
def generate_backlog_draft_markdown(
|
|
310
|
+
*,
|
|
311
|
+
item_id: int,
|
|
312
|
+
package: str,
|
|
313
|
+
report: ReportRecord,
|
|
314
|
+
decision: TriageDecision,
|
|
315
|
+
llm_suggestion: Optional[Dict[str, Any]] = None,
|
|
316
|
+
repo_root: Optional[Path] = None,
|
|
317
|
+
) -> str:
|
|
318
|
+
task_type = "bug" if report.report_type == "bug" else "feature"
|
|
319
|
+
type_tag = "BUG" if task_type == "bug" else "FEATURE"
|
|
320
|
+
title = _draft_title(report)
|
|
321
|
+
summary = _draft_summary(report)
|
|
322
|
+
created = _now_local_timestamp()
|
|
323
|
+
|
|
324
|
+
llm_title = ""
|
|
325
|
+
llm_pkgs = ""
|
|
326
|
+
llm_ac = ""
|
|
327
|
+
if isinstance(llm_suggestion, dict):
|
|
328
|
+
llm_title = str(llm_suggestion.get("backlog_title") or "").strip()
|
|
329
|
+
llm_pkgs = str(llm_suggestion.get("packages") or "").strip()
|
|
330
|
+
llm_ac = str(llm_suggestion.get("acceptance_criteria") or "").strip()
|
|
331
|
+
if llm_title:
|
|
332
|
+
title = re.sub(r"\s+", " ", llm_title).strip()
|
|
333
|
+
if len(title) > 120:
|
|
334
|
+
title = title[:120].rstrip()
|
|
335
|
+
if llm_pkgs:
|
|
336
|
+
package = llm_pkgs.split(",", 1)[0].strip().lower() or package
|
|
337
|
+
|
|
338
|
+
missing = decision.missing_fields or []
|
|
339
|
+
missing_md = "\n".join([f"- [ ] {m}" for m in missing]) if missing else "- (none)"
|
|
340
|
+
|
|
341
|
+
suggested_ac = llm_ac.strip()
|
|
342
|
+
if not suggested_ac:
|
|
343
|
+
if report.report_type == "bug":
|
|
344
|
+
suggested_ac = "- [ ] Reproduce the issue and identify root cause\n- [ ] Implement fix with minimal blast radius\n- [ ] Add targeted tests (ADR-0019)\n- [ ] Verify no regressions"
|
|
345
|
+
else:
|
|
346
|
+
suggested_ac = (
|
|
347
|
+
"- [ ] Confirm problem/motivation and proposed solution\n"
|
|
348
|
+
"- [ ] Define clear acceptance criteria (2–5 items)\n"
|
|
349
|
+
"- [ ] Implement the feature with minimal dependencies\n"
|
|
350
|
+
"- [ ] Add/adjust tests per ADR-0019\n"
|
|
351
|
+
"- [ ] Verify UX + durability expectations (replay/session context)"
|
|
352
|
+
)
|
|
353
|
+
|
|
354
|
+
related = _format_related(report, decision, repo_root=repo_root)
|
|
355
|
+
related_backlog = _related_backlog_markdown(repo_root=repo_root, report=report, k=6)
|
|
356
|
+
related_adrs = _related_adrs_markdown(repo_root=repo_root, report=report, k=4)
|
|
357
|
+
|
|
358
|
+
return (
|
|
359
|
+
f"# {item_id:03d}-{package}: [{type_tag}] {title}\n\n"
|
|
360
|
+
f"> Created: {created}\n"
|
|
361
|
+
f"> Type: {task_type}\n"
|
|
362
|
+
f"> Source report relpath: {decision.report_relpath}\n"
|
|
363
|
+
f"> Source report id: {report.header.report_id or ''}\n\n"
|
|
364
|
+
"## Summary\n"
|
|
365
|
+
f"{summary}\n\n"
|
|
366
|
+
"## Diagram\n"
|
|
367
|
+
"```\n"
|
|
368
|
+
"/bug|/feature -> gateway report -> auto-proposed backlog -> (optional triage) -> planned -> implementation\n"
|
|
369
|
+
"```\n\n"
|
|
370
|
+
"## Context\n"
|
|
371
|
+
f"{related}\n\n"
|
|
372
|
+
"## Related (best-effort)\n"
|
|
373
|
+
"### Potential ADRs\n"
|
|
374
|
+
f"{related_adrs}\n\n"
|
|
375
|
+
"### Potential dependencies / related backlog items\n"
|
|
376
|
+
f"{related_backlog}\n\n"
|
|
377
|
+
"## Scope\n"
|
|
378
|
+
"### Included\n"
|
|
379
|
+
"- Fix/implement the behavior described in the report\n"
|
|
380
|
+
"- Add/adjust tests per ADR-0019\n\n"
|
|
381
|
+
"### Excluded\n"
|
|
382
|
+
"- Unrelated refactors\n"
|
|
383
|
+
"- Broad UX redesigns unless required by the report\n\n"
|
|
384
|
+
"## Missing Info (to confirm)\n"
|
|
385
|
+
f"{missing_md}\n\n"
|
|
386
|
+
"## Implementation Plan\n"
|
|
387
|
+
"1. Reproduce and narrow down the cause.\n"
|
|
388
|
+
"2. Identify the smallest correct fix.\n"
|
|
389
|
+
"3. Add tests (Level A/B as applicable).\n"
|
|
390
|
+
"4. Verify in the relevant client(s).\n\n"
|
|
391
|
+
"## Acceptance Criteria\n"
|
|
392
|
+
f"{suggested_ac}\n\n"
|
|
393
|
+
"## Testing (ADR-0019)\n"
|
|
394
|
+
"- Level A (basic): add targeted unit/contract tests.\n"
|
|
395
|
+
"- Level B (integration): reproduce with file-backed stores or the relevant gateway/client wiring.\n"
|
|
396
|
+
"- Level C (optional): run a real client flow if it requires external infra.\n\n"
|
|
397
|
+
"## Related\n"
|
|
398
|
+
f"- Report inbox + triage: `docs/backlog/planned/644-framework-automated-report-triage-pipeline-v0.md`\n"
|
|
399
|
+
f"- Backlog conventions: `docs/backlog/README.md`\n"
|
|
400
|
+
)
|
|
401
|
+
|
|
402
|
+
|
|
403
|
+
def write_backlog_draft(
|
|
404
|
+
*,
|
|
405
|
+
repo_root: Path,
|
|
406
|
+
backlog_root: Path,
|
|
407
|
+
allocator: BacklogIdAllocator,
|
|
408
|
+
report: ReportRecord,
|
|
409
|
+
decision: TriageDecision,
|
|
410
|
+
llm_suggestion: Optional[Dict[str, Any]] = None,
|
|
411
|
+
) -> Tuple[Path, int]:
|
|
412
|
+
proposed_dir = backlog_root / "proposed"
|
|
413
|
+
proposed_dir.mkdir(parents=True, exist_ok=True)
|
|
414
|
+
|
|
415
|
+
package = _guess_package(report)
|
|
416
|
+
title = str((llm_suggestion or {}).get("backlog_title") or report.header.title or "").strip()
|
|
417
|
+
slug = _slug(title or report.header.title or report.description or "draft")
|
|
418
|
+
|
|
419
|
+
last_err: Optional[str] = None
|
|
420
|
+
for _ in range(0, 500):
|
|
421
|
+
item_id = allocator.allocate()
|
|
422
|
+
filename = f"{item_id:03d}-{package}-{slug}.md"
|
|
423
|
+
path = proposed_dir / filename
|
|
424
|
+
|
|
425
|
+
md = generate_backlog_draft_markdown(
|
|
426
|
+
item_id=item_id,
|
|
427
|
+
package=package,
|
|
428
|
+
report=report,
|
|
429
|
+
decision=decision,
|
|
430
|
+
llm_suggestion=llm_suggestion,
|
|
431
|
+
repo_root=repo_root,
|
|
432
|
+
)
|
|
433
|
+
try:
|
|
434
|
+
with open(path, "x", encoding="utf-8") as f:
|
|
435
|
+
f.write(md)
|
|
436
|
+
last_err = None
|
|
437
|
+
break
|
|
438
|
+
except FileExistsError:
|
|
439
|
+
last_err = "Filename collision"
|
|
440
|
+
continue
|
|
441
|
+
|
|
442
|
+
if last_err is not None:
|
|
443
|
+
raise RuntimeError(last_err)
|
|
444
|
+
|
|
445
|
+
# Store repo-root relative path in the decision.
|
|
446
|
+
try:
|
|
447
|
+
rel = str(path.relative_to(repo_root))
|
|
448
|
+
except Exception:
|
|
449
|
+
rel = str(path)
|
|
450
|
+
decision.draft_relpath = rel
|
|
451
|
+
return path, item_id
|