codebase-intel 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- codebase_intel/__init__.py +3 -0
- codebase_intel/analytics/__init__.py +1 -0
- codebase_intel/analytics/benchmark.py +406 -0
- codebase_intel/analytics/feedback.py +496 -0
- codebase_intel/analytics/tracker.py +439 -0
- codebase_intel/cli/__init__.py +1 -0
- codebase_intel/cli/main.py +740 -0
- codebase_intel/contracts/__init__.py +1 -0
- codebase_intel/contracts/auto_generator.py +438 -0
- codebase_intel/contracts/evaluator.py +531 -0
- codebase_intel/contracts/models.py +433 -0
- codebase_intel/contracts/registry.py +225 -0
- codebase_intel/core/__init__.py +1 -0
- codebase_intel/core/config.py +248 -0
- codebase_intel/core/exceptions.py +454 -0
- codebase_intel/core/types.py +375 -0
- codebase_intel/decisions/__init__.py +1 -0
- codebase_intel/decisions/miner.py +297 -0
- codebase_intel/decisions/models.py +302 -0
- codebase_intel/decisions/store.py +411 -0
- codebase_intel/drift/__init__.py +1 -0
- codebase_intel/drift/detector.py +443 -0
- codebase_intel/graph/__init__.py +1 -0
- codebase_intel/graph/builder.py +391 -0
- codebase_intel/graph/parser.py +1232 -0
- codebase_intel/graph/query.py +377 -0
- codebase_intel/graph/storage.py +736 -0
- codebase_intel/mcp/__init__.py +1 -0
- codebase_intel/mcp/server.py +710 -0
- codebase_intel/orchestrator/__init__.py +1 -0
- codebase_intel/orchestrator/assembler.py +649 -0
- codebase_intel-0.1.0.dist-info/METADATA +361 -0
- codebase_intel-0.1.0.dist-info/RECORD +36 -0
- codebase_intel-0.1.0.dist-info/WHEEL +4 -0
- codebase_intel-0.1.0.dist-info/entry_points.txt +2 -0
- codebase_intel-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,496 @@
|
|
|
1
|
+
"""Context quality feedback loop — learns from agent output acceptance/rejection.
|
|
2
|
+
|
|
3
|
+
THIS IS THE MOAT. No other tool does this.
|
|
4
|
+
|
|
5
|
+
The problem: every context tool sends context and hopes for the best.
|
|
6
|
+
Nobody tracks whether the context actually helped. Did the agent's output
|
|
7
|
+
get accepted? Rejected? Modified heavily?
|
|
8
|
+
|
|
9
|
+
This module closes the loop:
|
|
10
|
+
1. When context is assembled, record a "session"
|
|
11
|
+
2. When the agent's output is reviewed (accepted/rejected/modified), record feedback
|
|
12
|
+
3. Over time, learn which context patterns lead to acceptance:
|
|
13
|
+
- "Decisions about auth always improve agent output" → boost priority
|
|
14
|
+
- "The AI guardrails contract catches 80% of rejections" → prove value
|
|
15
|
+
- "Files from the graph's depth-2 traversal are rarely useful" → trim context
|
|
16
|
+
4. Surface insights: "Your AI output acceptance rate improved from 62% to 84%
|
|
17
|
+
after adding quality contracts"
|
|
18
|
+
|
|
19
|
+
This is tracked per-project and powers the dashboard.
|
|
20
|
+
"""
|
|
21
|
+
|
|
22
|
+
from __future__ import annotations
|
|
23
|
+
|
|
24
|
+
import json
|
|
25
|
+
import sqlite3
|
|
26
|
+
from datetime import UTC, datetime
|
|
27
|
+
from enum import Enum
|
|
28
|
+
from pathlib import Path
|
|
29
|
+
from typing import Any
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class FeedbackType(str, Enum):
|
|
33
|
+
"""How the agent's output was received."""
|
|
34
|
+
|
|
35
|
+
ACCEPTED = "accepted" # Output used as-is or with minor edits
|
|
36
|
+
MODIFIED = "modified" # Output used but significantly changed
|
|
37
|
+
REJECTED = "rejected" # Output discarded entirely
|
|
38
|
+
PARTIAL = "partial" # Some parts used, some discarded
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class RejectionReason(str, Enum):
|
|
42
|
+
"""Why the output was rejected — maps to improvement actions."""
|
|
43
|
+
|
|
44
|
+
WRONG_PATTERN = "wrong_pattern" # Used a pattern the project doesn't follow
|
|
45
|
+
MISSING_CONTEXT = "missing_context" # Didn't know about a dependency/constraint
|
|
46
|
+
VIOLATED_DECISION = "violated_decision" # Contradicted an existing decision
|
|
47
|
+
HALLUCINATED = "hallucinated" # Referenced non-existent API/module
|
|
48
|
+
OVER_ENGINEERED = "over_engineered" # Too complex for the task
|
|
49
|
+
SECURITY_ISSUE = "security_issue" # Introduced a vulnerability
|
|
50
|
+
WRONG_APPROACH = "wrong_approach" # Fundamentally wrong solution
|
|
51
|
+
OTHER = "other"
|
|
52
|
+
|
|
53
|
+
|
|
54
|
+
FEEDBACK_SCHEMA = """
|
|
55
|
+
CREATE TABLE IF NOT EXISTS context_sessions (
|
|
56
|
+
session_id TEXT PRIMARY KEY,
|
|
57
|
+
timestamp TEXT NOT NULL,
|
|
58
|
+
task_description TEXT NOT NULL,
|
|
59
|
+
files_involved TEXT NOT NULL DEFAULT '[]',
|
|
60
|
+
context_event_id INTEGER,
|
|
61
|
+
tokens_used INTEGER NOT NULL DEFAULT 0,
|
|
62
|
+
decisions_provided INTEGER NOT NULL DEFAULT 0,
|
|
63
|
+
contracts_provided INTEGER NOT NULL DEFAULT 0
|
|
64
|
+
);
|
|
65
|
+
|
|
66
|
+
CREATE TABLE IF NOT EXISTS feedback_events (
|
|
67
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
68
|
+
session_id TEXT NOT NULL REFERENCES context_sessions(session_id),
|
|
69
|
+
timestamp TEXT NOT NULL,
|
|
70
|
+
feedback_type TEXT NOT NULL,
|
|
71
|
+
rejection_reason TEXT,
|
|
72
|
+
details TEXT,
|
|
73
|
+
files_affected TEXT DEFAULT '[]',
|
|
74
|
+
improvement_suggestion TEXT
|
|
75
|
+
);
|
|
76
|
+
|
|
77
|
+
CREATE TABLE IF NOT EXISTS learning_insights (
|
|
78
|
+
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
|
79
|
+
timestamp TEXT NOT NULL,
|
|
80
|
+
insight_type TEXT NOT NULL,
|
|
81
|
+
description TEXT NOT NULL,
|
|
82
|
+
confidence REAL NOT NULL DEFAULT 0.5,
|
|
83
|
+
action_taken TEXT,
|
|
84
|
+
metadata_json TEXT DEFAULT '{}'
|
|
85
|
+
);
|
|
86
|
+
|
|
87
|
+
CREATE TABLE IF NOT EXISTS acceptance_trend (
|
|
88
|
+
date TEXT PRIMARY KEY,
|
|
89
|
+
total_sessions INTEGER NOT NULL DEFAULT 0,
|
|
90
|
+
accepted INTEGER NOT NULL DEFAULT 0,
|
|
91
|
+
modified INTEGER NOT NULL DEFAULT 0,
|
|
92
|
+
rejected INTEGER NOT NULL DEFAULT 0,
|
|
93
|
+
partial INTEGER NOT NULL DEFAULT 0,
|
|
94
|
+
acceptance_rate REAL NOT NULL DEFAULT 0
|
|
95
|
+
);
|
|
96
|
+
"""
|
|
97
|
+
|
|
98
|
+
|
|
99
|
+
class FeedbackTracker:
|
|
100
|
+
"""Tracks context quality and learns from agent output reception."""
|
|
101
|
+
|
|
102
|
+
def __init__(self, db_path: Path) -> None:
|
|
103
|
+
self._db_path = db_path
|
|
104
|
+
self._db_path.parent.mkdir(parents=True, exist_ok=True)
|
|
105
|
+
self._conn: sqlite3.Connection | None = None
|
|
106
|
+
|
|
107
|
+
def _get_conn(self) -> sqlite3.Connection:
|
|
108
|
+
if self._conn is None:
|
|
109
|
+
self._conn = sqlite3.connect(str(self._db_path))
|
|
110
|
+
self._conn.row_factory = sqlite3.Row
|
|
111
|
+
self._conn.executescript(FEEDBACK_SCHEMA)
|
|
112
|
+
return self._conn
|
|
113
|
+
|
|
114
|
+
def close(self) -> None:
|
|
115
|
+
if self._conn:
|
|
116
|
+
self._conn.close()
|
|
117
|
+
self._conn = None
|
|
118
|
+
|
|
119
|
+
def start_session(
|
|
120
|
+
self,
|
|
121
|
+
session_id: str,
|
|
122
|
+
task_description: str,
|
|
123
|
+
files_involved: list[str],
|
|
124
|
+
context_event_id: int | None = None,
|
|
125
|
+
tokens_used: int = 0,
|
|
126
|
+
decisions_provided: int = 0,
|
|
127
|
+
contracts_provided: int = 0,
|
|
128
|
+
) -> None:
|
|
129
|
+
"""Record that context was provided to an agent."""
|
|
130
|
+
conn = self._get_conn()
|
|
131
|
+
conn.execute(
|
|
132
|
+
"""
|
|
133
|
+
INSERT OR REPLACE INTO context_sessions (
|
|
134
|
+
session_id, timestamp, task_description, files_involved,
|
|
135
|
+
context_event_id, tokens_used, decisions_provided, contracts_provided
|
|
136
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?, ?)
|
|
137
|
+
""",
|
|
138
|
+
(
|
|
139
|
+
session_id,
|
|
140
|
+
datetime.now(UTC).isoformat(),
|
|
141
|
+
task_description[:500],
|
|
142
|
+
json.dumps(files_involved[:20]),
|
|
143
|
+
context_event_id,
|
|
144
|
+
tokens_used,
|
|
145
|
+
decisions_provided,
|
|
146
|
+
contracts_provided,
|
|
147
|
+
),
|
|
148
|
+
)
|
|
149
|
+
conn.commit()
|
|
150
|
+
|
|
151
|
+
def record_feedback(
|
|
152
|
+
self,
|
|
153
|
+
session_id: str,
|
|
154
|
+
feedback_type: FeedbackType,
|
|
155
|
+
rejection_reason: RejectionReason | None = None,
|
|
156
|
+
details: str | None = None,
|
|
157
|
+
files_affected: list[str] | None = None,
|
|
158
|
+
improvement_suggestion: str | None = None,
|
|
159
|
+
) -> None:
|
|
160
|
+
"""Record how the agent's output was received.
|
|
161
|
+
|
|
162
|
+
This is called by the MCP tool `record_feedback` — the agent or
|
|
163
|
+
user reports whether the generated code was useful.
|
|
164
|
+
"""
|
|
165
|
+
conn = self._get_conn()
|
|
166
|
+
conn.execute(
|
|
167
|
+
"""
|
|
168
|
+
INSERT INTO feedback_events (
|
|
169
|
+
session_id, timestamp, feedback_type, rejection_reason,
|
|
170
|
+
details, files_affected, improvement_suggestion
|
|
171
|
+
) VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
172
|
+
""",
|
|
173
|
+
(
|
|
174
|
+
session_id,
|
|
175
|
+
datetime.now(UTC).isoformat(),
|
|
176
|
+
feedback_type.value,
|
|
177
|
+
rejection_reason.value if rejection_reason else None,
|
|
178
|
+
details,
|
|
179
|
+
json.dumps(files_affected or []),
|
|
180
|
+
improvement_suggestion,
|
|
181
|
+
),
|
|
182
|
+
)
|
|
183
|
+
conn.commit()
|
|
184
|
+
self._update_acceptance_trend()
|
|
185
|
+
|
|
186
|
+
# Check if we can generate a learning insight
|
|
187
|
+
if feedback_type == FeedbackType.REJECTED and rejection_reason:
|
|
188
|
+
self._maybe_generate_insight(session_id, rejection_reason, details)
|
|
189
|
+
|
|
190
|
+
def get_acceptance_rate(self) -> dict[str, Any]:
|
|
191
|
+
"""Get overall and recent acceptance rates.
|
|
192
|
+
|
|
193
|
+
This is the headline number: "Your AI output acceptance rate is 84%"
|
|
194
|
+
"""
|
|
195
|
+
conn = self._get_conn()
|
|
196
|
+
|
|
197
|
+
# Overall
|
|
198
|
+
total = conn.execute("SELECT COUNT(*) FROM feedback_events").fetchone()[0]
|
|
199
|
+
accepted = conn.execute(
|
|
200
|
+
"SELECT COUNT(*) FROM feedback_events WHERE feedback_type IN ('accepted', 'modified')"
|
|
201
|
+
).fetchone()[0]
|
|
202
|
+
|
|
203
|
+
overall_rate = (accepted / total * 100) if total > 0 else 0
|
|
204
|
+
|
|
205
|
+
# Last 7 days
|
|
206
|
+
recent = conn.execute(
|
|
207
|
+
"""
|
|
208
|
+
SELECT
|
|
209
|
+
COUNT(*) as total,
|
|
210
|
+
SUM(CASE WHEN feedback_type IN ('accepted', 'modified') THEN 1 ELSE 0 END) as accepted
|
|
211
|
+
FROM feedback_events
|
|
212
|
+
WHERE timestamp >= datetime('now', '-7 days')
|
|
213
|
+
"""
|
|
214
|
+
).fetchone()
|
|
215
|
+
recent_total = recent["total"]
|
|
216
|
+
recent_accepted = recent["accepted"] or 0
|
|
217
|
+
recent_rate = (recent_accepted / recent_total * 100) if recent_total > 0 else 0
|
|
218
|
+
|
|
219
|
+
return {
|
|
220
|
+
"overall": {
|
|
221
|
+
"total_sessions": total,
|
|
222
|
+
"acceptance_rate": round(overall_rate, 1),
|
|
223
|
+
},
|
|
224
|
+
"last_7_days": {
|
|
225
|
+
"total_sessions": recent_total,
|
|
226
|
+
"acceptance_rate": round(recent_rate, 1),
|
|
227
|
+
},
|
|
228
|
+
"improvement": round(recent_rate - overall_rate, 1) if total > 10 else None,
|
|
229
|
+
}
|
|
230
|
+
|
|
231
|
+
def get_rejection_analysis(self) -> dict[str, Any]:
|
|
232
|
+
"""Analyze why outputs are rejected — this drives improvement.
|
|
233
|
+
|
|
234
|
+
"43% of rejections are because the agent used wrong patterns.
|
|
235
|
+
Add 2 more contract rules to fix this."
|
|
236
|
+
"""
|
|
237
|
+
conn = self._get_conn()
|
|
238
|
+
total_rejections = conn.execute(
|
|
239
|
+
"SELECT COUNT(*) FROM feedback_events WHERE feedback_type = 'rejected'"
|
|
240
|
+
).fetchone()[0]
|
|
241
|
+
|
|
242
|
+
if total_rejections == 0:
|
|
243
|
+
return {"total_rejections": 0, "reasons": []}
|
|
244
|
+
|
|
245
|
+
rows = conn.execute(
|
|
246
|
+
"""
|
|
247
|
+
SELECT rejection_reason, COUNT(*) as count
|
|
248
|
+
FROM feedback_events
|
|
249
|
+
WHERE feedback_type = 'rejected' AND rejection_reason IS NOT NULL
|
|
250
|
+
GROUP BY rejection_reason
|
|
251
|
+
ORDER BY count DESC
|
|
252
|
+
"""
|
|
253
|
+
).fetchall()
|
|
254
|
+
|
|
255
|
+
reasons = []
|
|
256
|
+
for row in rows:
|
|
257
|
+
reason = row["rejection_reason"]
|
|
258
|
+
count = row["count"]
|
|
259
|
+
pct = count / total_rejections * 100
|
|
260
|
+
|
|
261
|
+
# Map rejection reasons to actionable suggestions
|
|
262
|
+
suggestion = self._suggestion_for_reason(reason)
|
|
263
|
+
|
|
264
|
+
reasons.append({
|
|
265
|
+
"reason": reason,
|
|
266
|
+
"count": count,
|
|
267
|
+
"percentage": round(pct, 1),
|
|
268
|
+
"suggestion": suggestion,
|
|
269
|
+
})
|
|
270
|
+
|
|
271
|
+
return {
|
|
272
|
+
"total_rejections": total_rejections,
|
|
273
|
+
"reasons": reasons,
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
def get_insights(self, limit: int = 10) -> list[dict[str, Any]]:
|
|
277
|
+
"""Get generated learning insights."""
|
|
278
|
+
conn = self._get_conn()
|
|
279
|
+
rows = conn.execute(
|
|
280
|
+
"SELECT * FROM learning_insights ORDER BY timestamp DESC LIMIT ?",
|
|
281
|
+
(limit,),
|
|
282
|
+
).fetchall()
|
|
283
|
+
return [dict(row) for row in rows]
|
|
284
|
+
|
|
285
|
+
def get_trend(self, days: int = 30) -> list[dict[str, Any]]:
|
|
286
|
+
"""Get acceptance rate trend over time."""
|
|
287
|
+
conn = self._get_conn()
|
|
288
|
+
rows = conn.execute(
|
|
289
|
+
"SELECT * FROM acceptance_trend ORDER BY date DESC LIMIT ?",
|
|
290
|
+
(days,),
|
|
291
|
+
).fetchall()
|
|
292
|
+
return [dict(row) for row in reversed(rows)]
|
|
293
|
+
|
|
294
|
+
def get_context_effectiveness(self) -> dict[str, Any]:
|
|
295
|
+
"""Which context types correlate with acceptance?
|
|
296
|
+
|
|
297
|
+
"Sessions with decisions provided had 91% acceptance rate.
|
|
298
|
+
Sessions without decisions had 64% acceptance rate."
|
|
299
|
+
|
|
300
|
+
This proves the value of each component.
|
|
301
|
+
"""
|
|
302
|
+
conn = self._get_conn()
|
|
303
|
+
|
|
304
|
+
# Sessions with decisions
|
|
305
|
+
with_decisions = conn.execute(
|
|
306
|
+
"""
|
|
307
|
+
SELECT
|
|
308
|
+
COUNT(DISTINCT cs.session_id) as total,
|
|
309
|
+
SUM(CASE WHEN fe.feedback_type IN ('accepted', 'modified') THEN 1 ELSE 0 END) as accepted
|
|
310
|
+
FROM context_sessions cs
|
|
311
|
+
JOIN feedback_events fe ON cs.session_id = fe.session_id
|
|
312
|
+
WHERE cs.decisions_provided > 0
|
|
313
|
+
"""
|
|
314
|
+
).fetchone()
|
|
315
|
+
|
|
316
|
+
# Sessions without decisions
|
|
317
|
+
without_decisions = conn.execute(
|
|
318
|
+
"""
|
|
319
|
+
SELECT
|
|
320
|
+
COUNT(DISTINCT cs.session_id) as total,
|
|
321
|
+
SUM(CASE WHEN fe.feedback_type IN ('accepted', 'modified') THEN 1 ELSE 0 END) as accepted
|
|
322
|
+
FROM context_sessions cs
|
|
323
|
+
JOIN feedback_events fe ON cs.session_id = fe.session_id
|
|
324
|
+
WHERE cs.decisions_provided = 0
|
|
325
|
+
"""
|
|
326
|
+
).fetchone()
|
|
327
|
+
|
|
328
|
+
# Sessions with contracts
|
|
329
|
+
with_contracts = conn.execute(
|
|
330
|
+
"""
|
|
331
|
+
SELECT
|
|
332
|
+
COUNT(DISTINCT cs.session_id) as total,
|
|
333
|
+
SUM(CASE WHEN fe.feedback_type IN ('accepted', 'modified') THEN 1 ELSE 0 END) as accepted
|
|
334
|
+
FROM context_sessions cs
|
|
335
|
+
JOIN feedback_events fe ON cs.session_id = fe.session_id
|
|
336
|
+
WHERE cs.contracts_provided > 0
|
|
337
|
+
"""
|
|
338
|
+
).fetchone()
|
|
339
|
+
|
|
340
|
+
def _rate(row: Any) -> float:
|
|
341
|
+
t = row["total"] or 0
|
|
342
|
+
a = row["accepted"] or 0
|
|
343
|
+
return round(a / t * 100, 1) if t > 0 else 0
|
|
344
|
+
|
|
345
|
+
return {
|
|
346
|
+
"with_decisions": {
|
|
347
|
+
"sessions": with_decisions["total"] or 0,
|
|
348
|
+
"acceptance_rate": _rate(with_decisions),
|
|
349
|
+
},
|
|
350
|
+
"without_decisions": {
|
|
351
|
+
"sessions": without_decisions["total"] or 0,
|
|
352
|
+
"acceptance_rate": _rate(without_decisions),
|
|
353
|
+
},
|
|
354
|
+
"with_contracts": {
|
|
355
|
+
"sessions": with_contracts["total"] or 0,
|
|
356
|
+
"acceptance_rate": _rate(with_contracts),
|
|
357
|
+
},
|
|
358
|
+
"insight": self._generate_effectiveness_insight(
|
|
359
|
+
with_decisions, without_decisions, with_contracts
|
|
360
|
+
),
|
|
361
|
+
}
|
|
362
|
+
|
|
363
|
+
# -------------------------------------------------------------------
|
|
364
|
+
# Internal
|
|
365
|
+
# -------------------------------------------------------------------
|
|
366
|
+
|
|
367
|
+
def _maybe_generate_insight(
|
|
368
|
+
self,
|
|
369
|
+
session_id: str,
|
|
370
|
+
reason: RejectionReason,
|
|
371
|
+
details: str | None,
|
|
372
|
+
) -> None:
|
|
373
|
+
"""Check if we have enough data to generate a learning insight."""
|
|
374
|
+
conn = self._get_conn()
|
|
375
|
+
|
|
376
|
+
# Count recent rejections for this reason
|
|
377
|
+
count = conn.execute(
|
|
378
|
+
"""
|
|
379
|
+
SELECT COUNT(*) FROM feedback_events
|
|
380
|
+
WHERE rejection_reason = ? AND timestamp >= datetime('now', '-30 days')
|
|
381
|
+
""",
|
|
382
|
+
(reason.value,),
|
|
383
|
+
).fetchone()[0]
|
|
384
|
+
|
|
385
|
+
# Need at least 3 instances to generate an insight
|
|
386
|
+
if count >= 3:
|
|
387
|
+
suggestion = self._suggestion_for_reason(reason.value)
|
|
388
|
+
conn.execute(
|
|
389
|
+
"""
|
|
390
|
+
INSERT INTO learning_insights (timestamp, insight_type, description, confidence, action_taken)
|
|
391
|
+
VALUES (?, ?, ?, ?, ?)
|
|
392
|
+
""",
|
|
393
|
+
(
|
|
394
|
+
datetime.now(UTC).isoformat(),
|
|
395
|
+
"pattern_detected",
|
|
396
|
+
f"Repeated rejection reason: {reason.value} ({count} times in 30 days). {suggestion}",
|
|
397
|
+
min(0.9, 0.3 + count * 0.1),
|
|
398
|
+
None,
|
|
399
|
+
),
|
|
400
|
+
)
|
|
401
|
+
conn.commit()
|
|
402
|
+
|
|
403
|
+
def _suggestion_for_reason(self, reason: str) -> str:
|
|
404
|
+
"""Map rejection reasons to actionable improvement suggestions."""
|
|
405
|
+
suggestions = {
|
|
406
|
+
"wrong_pattern": (
|
|
407
|
+
"Add a quality contract with the correct pattern as an example. "
|
|
408
|
+
"The agent needs to see DO/DON'T examples for your project's conventions."
|
|
409
|
+
),
|
|
410
|
+
"missing_context": (
|
|
411
|
+
"Create a decision record for the missing context. "
|
|
412
|
+
"If it's a dependency, check that the code graph captured the relationship."
|
|
413
|
+
),
|
|
414
|
+
"violated_decision": (
|
|
415
|
+
"The decision exists but wasn't surfaced. Check the decision's "
|
|
416
|
+
"code_anchors — they may need updating after recent refactors."
|
|
417
|
+
),
|
|
418
|
+
"hallucinated": (
|
|
419
|
+
"Enable the 'no-hallucinated-imports' AI guardrail contract. "
|
|
420
|
+
"The graph can verify import targets exist before the agent uses them."
|
|
421
|
+
),
|
|
422
|
+
"over_engineered": (
|
|
423
|
+
"Enable the 'no-over-abstraction' AI guardrail. "
|
|
424
|
+
"Consider adding a contract rule: 'no base classes with single implementation.'"
|
|
425
|
+
),
|
|
426
|
+
"security_issue": (
|
|
427
|
+
"Add security-focused contract rules. Check the community "
|
|
428
|
+
"contract packs for your framework."
|
|
429
|
+
),
|
|
430
|
+
"wrong_approach": (
|
|
431
|
+
"Create a decision record documenting the correct approach "
|
|
432
|
+
"and why the alternative was rejected."
|
|
433
|
+
),
|
|
434
|
+
}
|
|
435
|
+
return suggestions.get(reason, "Review the context assembly for this task type.")
|
|
436
|
+
|
|
437
|
+
def _generate_effectiveness_insight(
|
|
438
|
+
self, with_dec: Any, without_dec: Any, with_contracts: Any
|
|
439
|
+
) -> str:
|
|
440
|
+
"""Generate a human-readable insight about context effectiveness."""
|
|
441
|
+
parts = []
|
|
442
|
+
wd_total = with_dec["total"] or 0
|
|
443
|
+
wod_total = without_dec["total"] or 0
|
|
444
|
+
|
|
445
|
+
if wd_total >= 5 and wod_total >= 5:
|
|
446
|
+
wd_rate = (with_dec["accepted"] or 0) / wd_total * 100
|
|
447
|
+
wod_rate = (without_dec["accepted"] or 0) / wod_total * 100
|
|
448
|
+
diff = wd_rate - wod_rate
|
|
449
|
+
if diff > 5:
|
|
450
|
+
parts.append(
|
|
451
|
+
f"Sessions with decisions had {diff:.0f}% higher acceptance rate."
|
|
452
|
+
)
|
|
453
|
+
|
|
454
|
+
wc_total = with_contracts["total"] or 0
|
|
455
|
+
if wc_total >= 5:
|
|
456
|
+
wc_rate = (with_contracts["accepted"] or 0) / wc_total * 100
|
|
457
|
+
parts.append(f"Contract-guided sessions: {wc_rate:.0f}% acceptance rate.")
|
|
458
|
+
|
|
459
|
+
return " ".join(parts) if parts else "Not enough data yet for insights."
|
|
460
|
+
|
|
461
|
+
def _update_acceptance_trend(self) -> None:
|
|
462
|
+
"""Refresh today's acceptance trend entry."""
|
|
463
|
+
conn = self._get_conn()
|
|
464
|
+
today = datetime.now(UTC).strftime("%Y-%m-%d")
|
|
465
|
+
|
|
466
|
+
row = conn.execute(
|
|
467
|
+
"""
|
|
468
|
+
SELECT
|
|
469
|
+
COUNT(*) as total,
|
|
470
|
+
SUM(CASE WHEN feedback_type = 'accepted' THEN 1 ELSE 0 END) as accepted,
|
|
471
|
+
SUM(CASE WHEN feedback_type = 'modified' THEN 1 ELSE 0 END) as modified,
|
|
472
|
+
SUM(CASE WHEN feedback_type = 'rejected' THEN 1 ELSE 0 END) as rejected,
|
|
473
|
+
SUM(CASE WHEN feedback_type = 'partial' THEN 1 ELSE 0 END) as partial
|
|
474
|
+
FROM feedback_events
|
|
475
|
+
WHERE timestamp LIKE ?
|
|
476
|
+
""",
|
|
477
|
+
(f"{today}%",),
|
|
478
|
+
).fetchone()
|
|
479
|
+
|
|
480
|
+
total = row["total"]
|
|
481
|
+
accepted = (row["accepted"] or 0) + (row["modified"] or 0)
|
|
482
|
+
rate = (accepted / total * 100) if total > 0 else 0
|
|
483
|
+
|
|
484
|
+
conn.execute(
|
|
485
|
+
"""
|
|
486
|
+
INSERT INTO acceptance_trend (date, total_sessions, accepted, modified, rejected, partial, acceptance_rate)
|
|
487
|
+
VALUES (?, ?, ?, ?, ?, ?, ?)
|
|
488
|
+
ON CONFLICT(date) DO UPDATE SET
|
|
489
|
+
total_sessions=excluded.total_sessions,
|
|
490
|
+
accepted=excluded.accepted, modified=excluded.modified,
|
|
491
|
+
rejected=excluded.rejected, partial=excluded.partial,
|
|
492
|
+
acceptance_rate=excluded.acceptance_rate
|
|
493
|
+
""",
|
|
494
|
+
(today, total, row["accepted"] or 0, row["modified"] or 0, row["rejected"] or 0, row["partial"] or 0, round(rate, 1)),
|
|
495
|
+
)
|
|
496
|
+
conn.commit()
|