git-aware-coding-agent 1.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- avos_cli/__init__.py +3 -0
- avos_cli/agents/avos_ask_agent.md +47 -0
- avos_cli/agents/avos_ask_agent_JSON_converter.md +78 -0
- avos_cli/agents/avos_hisotry_agent_JSON_converter.md +92 -0
- avos_cli/agents/avos_history_agent.md +58 -0
- avos_cli/agents/git_diff_agent.md +63 -0
- avos_cli/artifacts/__init__.py +17 -0
- avos_cli/artifacts/base.py +47 -0
- avos_cli/artifacts/commit_builder.py +35 -0
- avos_cli/artifacts/doc_builder.py +30 -0
- avos_cli/artifacts/issue_builder.py +37 -0
- avos_cli/artifacts/pr_builder.py +50 -0
- avos_cli/cli/__init__.py +1 -0
- avos_cli/cli/main.py +504 -0
- avos_cli/commands/__init__.py +1 -0
- avos_cli/commands/ask.py +541 -0
- avos_cli/commands/connect.py +363 -0
- avos_cli/commands/history.py +549 -0
- avos_cli/commands/hook_install.py +260 -0
- avos_cli/commands/hook_sync.py +231 -0
- avos_cli/commands/ingest.py +506 -0
- avos_cli/commands/ingest_pr.py +239 -0
- avos_cli/config/__init__.py +1 -0
- avos_cli/config/hash_store.py +93 -0
- avos_cli/config/lock.py +122 -0
- avos_cli/config/manager.py +180 -0
- avos_cli/config/state.py +90 -0
- avos_cli/exceptions.py +272 -0
- avos_cli/models/__init__.py +58 -0
- avos_cli/models/api.py +75 -0
- avos_cli/models/artifacts.py +99 -0
- avos_cli/models/config.py +56 -0
- avos_cli/models/diff.py +117 -0
- avos_cli/models/query.py +234 -0
- avos_cli/parsers/__init__.py +21 -0
- avos_cli/parsers/artifact_ref_extractor.py +173 -0
- avos_cli/parsers/reference_parser.py +117 -0
- avos_cli/services/__init__.py +1 -0
- avos_cli/services/chronology_service.py +68 -0
- avos_cli/services/citation_validator.py +134 -0
- avos_cli/services/context_budget_service.py +104 -0
- avos_cli/services/diff_resolver.py +398 -0
- avos_cli/services/diff_summary_service.py +141 -0
- avos_cli/services/git_client.py +351 -0
- avos_cli/services/github_client.py +443 -0
- avos_cli/services/llm_client.py +312 -0
- avos_cli/services/memory_client.py +323 -0
- avos_cli/services/query_fallback_formatter.py +108 -0
- avos_cli/services/reply_output_service.py +341 -0
- avos_cli/services/sanitization_service.py +218 -0
- avos_cli/utils/__init__.py +1 -0
- avos_cli/utils/dotenv_load.py +50 -0
- avos_cli/utils/hashing.py +22 -0
- avos_cli/utils/logger.py +77 -0
- avos_cli/utils/output.py +232 -0
- avos_cli/utils/sanitization_diagnostics.py +81 -0
- avos_cli/utils/time_helpers.py +56 -0
- git_aware_coding_agent-1.0.0.dist-info/METADATA +390 -0
- git_aware_coding_agent-1.0.0.dist-info/RECORD +62 -0
- git_aware_coding_agent-1.0.0.dist-info/WHEEL +4 -0
- git_aware_coding_agent-1.0.0.dist-info/entry_points.txt +2 -0
- git_aware_coding_agent-1.0.0.dist-info/licenses/LICENSE +201 -0
avos_cli/commands/ask.py
ADDED
|
@@ -0,0 +1,541 @@
|
|
|
1
|
+
"""Ask command orchestrator for AVOS CLI.
|
|
2
|
+
|
|
3
|
+
Implements the `avos ask "question"` flow: retrieves relevant memory
|
|
4
|
+
artifacts, enriches with git diff summaries, sanitizes, packs within budget,
|
|
5
|
+
synthesizes via LLM, validates citation grounding, and renders answer or
|
|
6
|
+
deterministic fallback.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
from __future__ import annotations
|
|
10
|
+
|
|
11
|
+
import json as json_module
|
|
12
|
+
from pathlib import Path
|
|
13
|
+
from typing import TYPE_CHECKING
|
|
14
|
+
|
|
15
|
+
from avos_cli.config.manager import load_config
|
|
16
|
+
from avos_cli.exceptions import (
|
|
17
|
+
AvosError,
|
|
18
|
+
ConfigurationNotInitializedError,
|
|
19
|
+
LLMSynthesisError,
|
|
20
|
+
)
|
|
21
|
+
from avos_cli.models.api import SearchHit
|
|
22
|
+
from avos_cli.models.diff import DiffStatus
|
|
23
|
+
from avos_cli.models.query import (
|
|
24
|
+
FallbackReason,
|
|
25
|
+
QueryMode,
|
|
26
|
+
RetrievedArtifact,
|
|
27
|
+
SanitizedArtifact,
|
|
28
|
+
SynthesisRequest,
|
|
29
|
+
)
|
|
30
|
+
from avos_cli.parsers import ReferenceParser, extract_refs_by_note
|
|
31
|
+
from avos_cli.services.citation_validator import CitationValidator
|
|
32
|
+
from avos_cli.services.context_budget_service import ContextBudgetService
|
|
33
|
+
from avos_cli.services.diff_resolver import DiffResolver
|
|
34
|
+
from avos_cli.services.diff_summary_service import DiffSummaryService
|
|
35
|
+
from avos_cli.services.llm_client import LLMClient
|
|
36
|
+
from avos_cli.services.memory_client import AvosMemoryClient
|
|
37
|
+
from avos_cli.services.query_fallback_formatter import QueryFallbackFormatter
|
|
38
|
+
from avos_cli.services.reply_output_service import (
|
|
39
|
+
ReplyOutputService,
|
|
40
|
+
dumb_format_ask,
|
|
41
|
+
parse_ask_response,
|
|
42
|
+
)
|
|
43
|
+
from avos_cli.services.sanitization_service import SanitizationService
|
|
44
|
+
from avos_cli.utils.logger import get_logger
|
|
45
|
+
from avos_cli.utils.output import (
|
|
46
|
+
print_error,
|
|
47
|
+
print_info,
|
|
48
|
+
print_json,
|
|
49
|
+
print_warning,
|
|
50
|
+
render_panel,
|
|
51
|
+
render_table,
|
|
52
|
+
)
|
|
53
|
+
from avos_cli.utils.sanitization_diagnostics import explain_sanitization_gate
|
|
54
|
+
|
|
55
|
+
if TYPE_CHECKING:
|
|
56
|
+
from avos_cli.services.github_client import GitHubClient
|
|
57
|
+
|
|
58
|
+
_log = get_logger("commands.ask")
|
|
59
|
+
|
|
60
|
+
_ASK_K = 10
|
|
61
|
+
_ASK_SEARCH_MODE = "semantic"
|
|
62
|
+
_MIN_GROUNDED_CITATIONS = 2
|
|
63
|
+
_SANITIZATION_CONFIDENCE_THRESHOLD = 70
|
|
64
|
+
|
|
65
|
+
|
|
66
|
+
def _build_raw_output(artifacts: list[SanitizedArtifact]) -> str:
|
|
67
|
+
"""Build raw artifact string for reply layer (matches QueryFallbackFormatter format)."""
|
|
68
|
+
lines: list[str] = []
|
|
69
|
+
for art in artifacts:
|
|
70
|
+
lines.append(f"[{art.note_id}] ({art.created_at})\n{art.content}")
|
|
71
|
+
lines.append("---")
|
|
72
|
+
return "\n".join(lines)
|
|
73
|
+
|
|
74
|
+
|
|
75
|
+
def _render_reply_output(
|
|
76
|
+
question: str,
|
|
77
|
+
raw_output: str,
|
|
78
|
+
reply_service: ReplyOutputService | None,
|
|
79
|
+
json_output: bool = False,
|
|
80
|
+
json_merge: dict[str, object] | None = None,
|
|
81
|
+
) -> None:
|
|
82
|
+
"""Render ask output via reply layer or raw. Used for both success and fallback paths.
|
|
83
|
+
|
|
84
|
+
Args:
|
|
85
|
+
question: The user's question.
|
|
86
|
+
raw_output: Raw artifact content string.
|
|
87
|
+
reply_service: Optional reply output service for decorated terminal output.
|
|
88
|
+
json_output: If True, emit JSON via converter agent instead of human UI.
|
|
89
|
+
json_merge: Optional top-level keys merged into successful JSON ``data`` objects.
|
|
90
|
+
"""
|
|
91
|
+
if reply_service:
|
|
92
|
+
decorated = reply_service.format_ask(question, raw_output)
|
|
93
|
+
output = decorated if decorated else dumb_format_ask(raw_output)
|
|
94
|
+
|
|
95
|
+
if json_output:
|
|
96
|
+
json_str = reply_service.format_ask_json(output)
|
|
97
|
+
if json_str:
|
|
98
|
+
try:
|
|
99
|
+
parsed = json_module.loads(json_str)
|
|
100
|
+
if isinstance(parsed, dict) and json_merge:
|
|
101
|
+
for key, value in json_merge.items():
|
|
102
|
+
parsed[key] = value
|
|
103
|
+
print_json(success=True, data=parsed, error=None)
|
|
104
|
+
return
|
|
105
|
+
except json_module.JSONDecodeError:
|
|
106
|
+
_log.warning("JSON converter returned invalid JSON")
|
|
107
|
+
print_json(
|
|
108
|
+
success=False,
|
|
109
|
+
data=None,
|
|
110
|
+
error={
|
|
111
|
+
"code": "JSON_CONVERSION_FAILED",
|
|
112
|
+
"message": "Failed to convert ask output to JSON",
|
|
113
|
+
"hint": "Check REPLY_MODEL configuration",
|
|
114
|
+
"retryable": True,
|
|
115
|
+
},
|
|
116
|
+
)
|
|
117
|
+
return
|
|
118
|
+
|
|
119
|
+
answer, evidence = parse_ask_response(output)
|
|
120
|
+
render_panel("Answer", answer, style="success")
|
|
121
|
+
if evidence:
|
|
122
|
+
render_table(
|
|
123
|
+
f"Evidence ({len(evidence)} citations)",
|
|
124
|
+
[("Reference", "")],
|
|
125
|
+
[[line] for line in evidence],
|
|
126
|
+
)
|
|
127
|
+
else:
|
|
128
|
+
if json_output:
|
|
129
|
+
print_json(
|
|
130
|
+
success=False,
|
|
131
|
+
data=None,
|
|
132
|
+
error={
|
|
133
|
+
"code": "REPLY_SERVICE_UNAVAILABLE",
|
|
134
|
+
"message": "JSON output requires REPLY_MODEL configuration",
|
|
135
|
+
"hint": "Set REPLY_MODEL, REPLY_MODEL_URL, REPLY_MODEL_API_KEY environment variables",
|
|
136
|
+
"retryable": False,
|
|
137
|
+
},
|
|
138
|
+
)
|
|
139
|
+
return
|
|
140
|
+
print_info(raw_output)
|
|
141
|
+
|
|
142
|
+
|
|
143
|
+
class AskOrchestrator:
|
|
144
|
+
"""Orchestrates the `avos ask` command.
|
|
145
|
+
|
|
146
|
+
Pipeline: search -> enrich with diffs -> sanitize -> budget -> synthesize -> ground -> render/fallback.
|
|
147
|
+
Exit codes: 0=success, 1=precondition, 2=hard external error.
|
|
148
|
+
|
|
149
|
+
Args:
|
|
150
|
+
memory_client: Avos Memory API client.
|
|
151
|
+
llm_client: LLM synthesis client.
|
|
152
|
+
repo_root: Path to the repository root.
|
|
153
|
+
reply_service: Optional reply output service for decorated terminal output.
|
|
154
|
+
github_client: Optional GitHub client for diff enrichment.
|
|
155
|
+
diff_summary_service: Optional service for summarizing diffs via LLM.
|
|
156
|
+
"""
|
|
157
|
+
|
|
158
|
+
def __init__(
|
|
159
|
+
self,
|
|
160
|
+
memory_client: AvosMemoryClient,
|
|
161
|
+
llm_client: LLMClient,
|
|
162
|
+
repo_root: Path,
|
|
163
|
+
reply_service: ReplyOutputService | None = None,
|
|
164
|
+
github_client: GitHubClient | None = None,
|
|
165
|
+
diff_summary_service: DiffSummaryService | None = None,
|
|
166
|
+
) -> None:
|
|
167
|
+
self._memory = memory_client
|
|
168
|
+
self._llm = llm_client
|
|
169
|
+
self._repo_root = repo_root
|
|
170
|
+
self._reply_service = reply_service
|
|
171
|
+
self._github_client = github_client
|
|
172
|
+
self._diff_summary_service = diff_summary_service
|
|
173
|
+
self._sanitizer = SanitizationService()
|
|
174
|
+
self._budget = ContextBudgetService()
|
|
175
|
+
self._citation_validator = CitationValidator()
|
|
176
|
+
self._fallback_formatter = QueryFallbackFormatter()
|
|
177
|
+
|
|
178
|
+
def run(self, repo_slug: str, question: str, json_output: bool = False) -> int:
|
|
179
|
+
"""Execute the ask flow.
|
|
180
|
+
|
|
181
|
+
Args:
|
|
182
|
+
repo_slug: Repository identifier in 'org/repo' format.
|
|
183
|
+
question: Natural language question from the user.
|
|
184
|
+
json_output: If True, emit JSON output instead of human UI.
|
|
185
|
+
|
|
186
|
+
Returns:
|
|
187
|
+
Exit code: 0 (success/fallback), 1 (precondition), 2 (hard error).
|
|
188
|
+
"""
|
|
189
|
+
if "/" not in repo_slug:
|
|
190
|
+
if json_output:
|
|
191
|
+
print_json(
|
|
192
|
+
success=False,
|
|
193
|
+
data=None,
|
|
194
|
+
error={
|
|
195
|
+
"code": "REPOSITORY_CONTEXT_ERROR",
|
|
196
|
+
"message": "Invalid repo slug. Expected 'org/repo'.",
|
|
197
|
+
"hint": None,
|
|
198
|
+
"retryable": False,
|
|
199
|
+
},
|
|
200
|
+
)
|
|
201
|
+
else:
|
|
202
|
+
print_error("[REPOSITORY_CONTEXT_ERROR] Invalid repo slug. Expected 'org/repo'.")
|
|
203
|
+
return 1
|
|
204
|
+
|
|
205
|
+
try:
|
|
206
|
+
config = load_config(self._repo_root)
|
|
207
|
+
except ConfigurationNotInitializedError as e:
|
|
208
|
+
if json_output:
|
|
209
|
+
print_json(
|
|
210
|
+
success=False,
|
|
211
|
+
data=None,
|
|
212
|
+
error={
|
|
213
|
+
"code": "CONFIG_NOT_INITIALIZED",
|
|
214
|
+
"message": str(e),
|
|
215
|
+
"hint": "Run 'avos connect org/repo' first.",
|
|
216
|
+
"retryable": False,
|
|
217
|
+
},
|
|
218
|
+
)
|
|
219
|
+
else:
|
|
220
|
+
print_error(f"[CONFIG_NOT_INITIALIZED] {e}")
|
|
221
|
+
return 1
|
|
222
|
+
except AvosError as e:
|
|
223
|
+
if json_output:
|
|
224
|
+
print_json(
|
|
225
|
+
success=False,
|
|
226
|
+
data=None,
|
|
227
|
+
error={
|
|
228
|
+
"code": e.code,
|
|
229
|
+
"message": str(e),
|
|
230
|
+
"hint": getattr(e, "hint", None),
|
|
231
|
+
"retryable": getattr(e, "retryable", False),
|
|
232
|
+
},
|
|
233
|
+
)
|
|
234
|
+
else:
|
|
235
|
+
print_error(f"[{e.code}] {e}")
|
|
236
|
+
return 1
|
|
237
|
+
|
|
238
|
+
memory_id = config.memory_id
|
|
239
|
+
|
|
240
|
+
# Stage 1: Retrieve
|
|
241
|
+
try:
|
|
242
|
+
search_result = self._memory.search(
|
|
243
|
+
memory_id=memory_id, query=question, k=_ASK_K, mode=_ASK_SEARCH_MODE
|
|
244
|
+
)
|
|
245
|
+
except AvosError as e:
|
|
246
|
+
if json_output:
|
|
247
|
+
print_json(
|
|
248
|
+
success=False,
|
|
249
|
+
data=None,
|
|
250
|
+
error={
|
|
251
|
+
"code": e.code,
|
|
252
|
+
"message": f"Memory search failed: {e}",
|
|
253
|
+
"hint": getattr(e, "hint", None),
|
|
254
|
+
"retryable": getattr(e, "retryable", True),
|
|
255
|
+
},
|
|
256
|
+
)
|
|
257
|
+
else:
|
|
258
|
+
print_error(f"[{e.code}] Memory search failed: {e}")
|
|
259
|
+
return 2
|
|
260
|
+
|
|
261
|
+
# Stage 2: Empty check
|
|
262
|
+
if not search_result.results:
|
|
263
|
+
if json_output:
|
|
264
|
+
print_json(
|
|
265
|
+
success=True,
|
|
266
|
+
data={
|
|
267
|
+
"format": "avos.ask.v1",
|
|
268
|
+
"raw_text": "",
|
|
269
|
+
"answer": {"text": "No matching evidence found in repository memory."},
|
|
270
|
+
"evidence": {"is_none": True, "items": [], "unparsed_lines": []},
|
|
271
|
+
"parse_warnings": [],
|
|
272
|
+
},
|
|
273
|
+
error=None,
|
|
274
|
+
)
|
|
275
|
+
else:
|
|
276
|
+
print_info(
|
|
277
|
+
"No matching evidence found in repository memory. Try a different question or ingest more data."
|
|
278
|
+
)
|
|
279
|
+
return 0
|
|
280
|
+
|
|
281
|
+
# Convert to internal model
|
|
282
|
+
artifacts = [
|
|
283
|
+
RetrievedArtifact(
|
|
284
|
+
note_id=hit.note_id,
|
|
285
|
+
content=hit.content,
|
|
286
|
+
created_at=hit.created_at,
|
|
287
|
+
rank=hit.rank,
|
|
288
|
+
)
|
|
289
|
+
for hit in search_result.results
|
|
290
|
+
]
|
|
291
|
+
|
|
292
|
+
# Stage 2.5: Diff enrichment (graceful skip)
|
|
293
|
+
enriched_artifacts = self._enrich_with_diffs(
|
|
294
|
+
search_result.results, artifacts, repo_slug
|
|
295
|
+
)
|
|
296
|
+
if enriched_artifacts is not None:
|
|
297
|
+
artifacts = enriched_artifacts
|
|
298
|
+
|
|
299
|
+
# Stage 3: Sanitize
|
|
300
|
+
sanitization_result = self._sanitizer.sanitize(artifacts)
|
|
301
|
+
|
|
302
|
+
if sanitization_result.confidence_score < _SANITIZATION_CONFIDENCE_THRESHOLD:
|
|
303
|
+
_log.warning(
|
|
304
|
+
"Sanitization confidence %d below threshold %d",
|
|
305
|
+
sanitization_result.confidence_score,
|
|
306
|
+
_SANITIZATION_CONFIDENCE_THRESHOLD,
|
|
307
|
+
)
|
|
308
|
+
fallback_output = self._fallback_formatter.format_ask_fallback(
|
|
309
|
+
sanitization_result.artifacts, FallbackReason.SAFETY_BLOCK
|
|
310
|
+
)
|
|
311
|
+
headline, detail_lines, json_merge = explain_sanitization_gate(
|
|
312
|
+
sanitization_result, _SANITIZATION_CONFIDENCE_THRESHOLD
|
|
313
|
+
)
|
|
314
|
+
if not json_output:
|
|
315
|
+
print_warning(headline)
|
|
316
|
+
for line in detail_lines:
|
|
317
|
+
print_info(line)
|
|
318
|
+
_render_reply_output(
|
|
319
|
+
question,
|
|
320
|
+
fallback_output,
|
|
321
|
+
self._reply_service,
|
|
322
|
+
json_output,
|
|
323
|
+
json_merge=json_merge,
|
|
324
|
+
)
|
|
325
|
+
return 0
|
|
326
|
+
|
|
327
|
+
# Stage 4: Budget pack
|
|
328
|
+
budget_result = self._budget.pack(sanitization_result.artifacts, mode="ask")
|
|
329
|
+
|
|
330
|
+
if budget_result.included_count < _MIN_GROUNDED_CITATIONS:
|
|
331
|
+
fallback_output = self._fallback_formatter.format_ask_fallback(
|
|
332
|
+
sanitization_result.artifacts, FallbackReason.BUDGET_EXHAUSTED
|
|
333
|
+
)
|
|
334
|
+
if not json_output:
|
|
335
|
+
print_warning("Insufficient evidence for synthesis.")
|
|
336
|
+
_render_reply_output(question, fallback_output, self._reply_service, json_output)
|
|
337
|
+
return 0
|
|
338
|
+
|
|
339
|
+
# Stage 5: Synthesize
|
|
340
|
+
try:
|
|
341
|
+
synthesis_request = SynthesisRequest(
|
|
342
|
+
mode=QueryMode.ASK,
|
|
343
|
+
query=question,
|
|
344
|
+
provider=config.llm.provider,
|
|
345
|
+
model=config.llm.model,
|
|
346
|
+
prompt_template_version="ask_v1",
|
|
347
|
+
artifacts=budget_result.included,
|
|
348
|
+
)
|
|
349
|
+
synthesis_response = self._llm.synthesize(synthesis_request)
|
|
350
|
+
except LLMSynthesisError as e:
|
|
351
|
+
_log.warning("LLM synthesis failed: %s", e)
|
|
352
|
+
fallback_output = self._fallback_formatter.format_ask_fallback(
|
|
353
|
+
sanitization_result.artifacts, FallbackReason.LLM_UNAVAILABLE
|
|
354
|
+
)
|
|
355
|
+
if not json_output:
|
|
356
|
+
print_warning("LLM synthesis unavailable. Showing raw evidence.")
|
|
357
|
+
_render_reply_output(question, fallback_output, self._reply_service, json_output)
|
|
358
|
+
return 0
|
|
359
|
+
|
|
360
|
+
# Stage 6: Validate citations
|
|
361
|
+
grounded, dropped, warnings = self._citation_validator.validate(
|
|
362
|
+
synthesis_response.answer_text, budget_result.included
|
|
363
|
+
)
|
|
364
|
+
|
|
365
|
+
if len(grounded) < _MIN_GROUNDED_CITATIONS:
|
|
366
|
+
_log.warning(
|
|
367
|
+
"Grounding failed: %d/%d citations grounded",
|
|
368
|
+
len(grounded),
|
|
369
|
+
len(grounded) + len(dropped),
|
|
370
|
+
)
|
|
371
|
+
fallback_output = self._fallback_formatter.format_ask_fallback(
|
|
372
|
+
sanitization_result.artifacts, FallbackReason.GROUNDING_FAILED
|
|
373
|
+
)
|
|
374
|
+
if not json_output:
|
|
375
|
+
print_warning("Citation grounding insufficient. Showing raw evidence.")
|
|
376
|
+
_render_reply_output(question, fallback_output, self._reply_service, json_output)
|
|
377
|
+
return 0
|
|
378
|
+
|
|
379
|
+
# Stage 7: Render
|
|
380
|
+
if not json_output:
|
|
381
|
+
for w in warnings:
|
|
382
|
+
print_warning(w)
|
|
383
|
+
|
|
384
|
+
if self._reply_service:
|
|
385
|
+
raw_output = _build_raw_output(budget_result.included)
|
|
386
|
+
_render_reply_output(question, raw_output, self._reply_service, json_output)
|
|
387
|
+
else:
|
|
388
|
+
if json_output:
|
|
389
|
+
print_json(
|
|
390
|
+
success=False,
|
|
391
|
+
data=None,
|
|
392
|
+
error={
|
|
393
|
+
"code": "REPLY_SERVICE_UNAVAILABLE",
|
|
394
|
+
"message": "JSON output requires REPLY_MODEL configuration",
|
|
395
|
+
"hint": "Set REPLY_MODEL, REPLY_MODEL_URL, REPLY_MODEL_API_KEY environment variables",
|
|
396
|
+
"retryable": False,
|
|
397
|
+
},
|
|
398
|
+
)
|
|
399
|
+
return 0
|
|
400
|
+
|
|
401
|
+
answer_text = synthesis_response.answer_text
|
|
402
|
+
try:
|
|
403
|
+
parsed = json_module.loads(answer_text)
|
|
404
|
+
if isinstance(parsed, dict) and "answer" in parsed:
|
|
405
|
+
answer_text = parsed["answer"]
|
|
406
|
+
except (json_module.JSONDecodeError, TypeError):
|
|
407
|
+
pass
|
|
408
|
+
|
|
409
|
+
render_panel("Answer", answer_text, style="success")
|
|
410
|
+
|
|
411
|
+
if grounded:
|
|
412
|
+
evidence_rows: list[list[str]] = []
|
|
413
|
+
for cit in grounded:
|
|
414
|
+
note_id_short = (
|
|
415
|
+
cit.note_id[:10] + ".." if len(cit.note_id) > 12 else cit.note_id
|
|
416
|
+
)
|
|
417
|
+
evidence_rows.append([note_id_short, cit.display_label])
|
|
418
|
+
render_table(
|
|
419
|
+
f"Evidence ({len(grounded)} citations)",
|
|
420
|
+
[("Note ID", "dim"), ("Label", "")],
|
|
421
|
+
evidence_rows,
|
|
422
|
+
)
|
|
423
|
+
|
|
424
|
+
return 0
|
|
425
|
+
|
|
426
|
+
def _enrich_with_diffs(
|
|
427
|
+
self,
|
|
428
|
+
hits: list[SearchHit],
|
|
429
|
+
artifacts: list[RetrievedArtifact],
|
|
430
|
+
repo_slug: str,
|
|
431
|
+
) -> list[RetrievedArtifact] | None:
|
|
432
|
+
"""Enrich artifacts with git diff summaries.
|
|
433
|
+
|
|
434
|
+
Extracts PR/commit references from search hits, fetches diffs via GitHub API,
|
|
435
|
+
summarizes them via the diff summary service, and injects summaries into
|
|
436
|
+
artifact content.
|
|
437
|
+
|
|
438
|
+
Args:
|
|
439
|
+
hits: Original search hits from memory API.
|
|
440
|
+
artifacts: Converted RetrievedArtifact list.
|
|
441
|
+
repo_slug: Repository slug for reference resolution.
|
|
442
|
+
|
|
443
|
+
Returns:
|
|
444
|
+
Enriched artifacts list, or None if enrichment should be skipped.
|
|
445
|
+
"""
|
|
446
|
+
if self._github_client is None or self._diff_summary_service is None:
|
|
447
|
+
_log.debug("Diff enrichment skipped: missing github_client or diff_summary_service")
|
|
448
|
+
return None
|
|
449
|
+
|
|
450
|
+
try:
|
|
451
|
+
note_refs_list = extract_refs_by_note(hits)
|
|
452
|
+
|
|
453
|
+
all_refs: list[str] = []
|
|
454
|
+
note_id_to_refs: dict[str, list[str]] = {}
|
|
455
|
+
for note_refs in note_refs_list:
|
|
456
|
+
note_id_to_refs[note_refs.note_id] = note_refs.references
|
|
457
|
+
all_refs.extend(note_refs.references)
|
|
458
|
+
|
|
459
|
+
if not all_refs:
|
|
460
|
+
_log.debug("No PR/commit references found in artifacts")
|
|
461
|
+
return None
|
|
462
|
+
|
|
463
|
+
parser = ReferenceParser()
|
|
464
|
+
parsed_refs = parser.parse_all(all_refs, repo_slug)
|
|
465
|
+
|
|
466
|
+
if not parsed_refs:
|
|
467
|
+
_log.debug("No valid references parsed")
|
|
468
|
+
return None
|
|
469
|
+
|
|
470
|
+
resolver = DiffResolver(self._github_client)
|
|
471
|
+
diff_results = resolver.resolve(parsed_refs)
|
|
472
|
+
|
|
473
|
+
resolved_diffs = [r for r in diff_results if r.status == DiffStatus.RESOLVED]
|
|
474
|
+
if not resolved_diffs:
|
|
475
|
+
_log.debug("No diffs resolved successfully")
|
|
476
|
+
return None
|
|
477
|
+
|
|
478
|
+
summaries = self._diff_summary_service.summarize_diffs(resolved_diffs)
|
|
479
|
+
if not summaries:
|
|
480
|
+
_log.debug("No diff summaries generated")
|
|
481
|
+
return None
|
|
482
|
+
|
|
483
|
+
canonical_to_summary: dict[str, str] = summaries
|
|
484
|
+
|
|
485
|
+
enriched: list[RetrievedArtifact] = []
|
|
486
|
+
for artifact in artifacts:
|
|
487
|
+
refs_for_note = note_id_to_refs.get(artifact.note_id, [])
|
|
488
|
+
summary_parts: list[str] = []
|
|
489
|
+
|
|
490
|
+
for ref_str in refs_for_note:
|
|
491
|
+
parsed = parser.parse(ref_str, repo_slug)
|
|
492
|
+
if parsed is None:
|
|
493
|
+
continue
|
|
494
|
+
for canonical_id, summary in canonical_to_summary.items():
|
|
495
|
+
if self._ref_matches_canonical(parsed, canonical_id):
|
|
496
|
+
summary_parts.append(summary)
|
|
497
|
+
break
|
|
498
|
+
|
|
499
|
+
if summary_parts:
|
|
500
|
+
combined_summary = "\n\n".join(summary_parts)
|
|
501
|
+
new_content = (
|
|
502
|
+
f"{artifact.content}\n\n--- Diff Summary ---\n{combined_summary}"
|
|
503
|
+
)
|
|
504
|
+
enriched.append(
|
|
505
|
+
RetrievedArtifact(
|
|
506
|
+
note_id=artifact.note_id,
|
|
507
|
+
content=new_content,
|
|
508
|
+
created_at=artifact.created_at,
|
|
509
|
+
rank=artifact.rank,
|
|
510
|
+
)
|
|
511
|
+
)
|
|
512
|
+
else:
|
|
513
|
+
enriched.append(artifact)
|
|
514
|
+
|
|
515
|
+
return enriched
|
|
516
|
+
|
|
517
|
+
except Exception as e:
|
|
518
|
+
_log.warning("Diff enrichment failed: %s", e)
|
|
519
|
+
return None
|
|
520
|
+
|
|
521
|
+
def _ref_matches_canonical(self, parsed: object, canonical_id: str) -> bool:
|
|
522
|
+
"""Check if a parsed reference matches a canonical ID.
|
|
523
|
+
|
|
524
|
+
Args:
|
|
525
|
+
parsed: ParsedReference object.
|
|
526
|
+
canonical_id: Canonical ID like 'PR #123' or full SHA.
|
|
527
|
+
|
|
528
|
+
Returns:
|
|
529
|
+
True if the reference matches.
|
|
530
|
+
"""
|
|
531
|
+
from avos_cli.models.diff import DiffReferenceType, ParsedReference
|
|
532
|
+
|
|
533
|
+
if not isinstance(parsed, ParsedReference):
|
|
534
|
+
return False
|
|
535
|
+
|
|
536
|
+
if parsed.reference_type == DiffReferenceType.PR:
|
|
537
|
+
return canonical_id == f"PR #{parsed.raw_id}"
|
|
538
|
+
else:
|
|
539
|
+
return canonical_id.startswith(parsed.raw_id) or parsed.raw_id.startswith(
|
|
540
|
+
canonical_id[:7]
|
|
541
|
+
)
|