@simbimbo/memory-ocmemog 0.1.16 → 0.1.17

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -18,20 +18,20 @@ _REVIEW_KIND_METADATA: Dict[str, Dict[str, str]] = {
18
18
  "duplicate_candidate": {
19
19
  "relationship": "duplicate_of",
20
20
  "label": "Duplicate candidate",
21
- "approve_label": "Approve duplicate merge",
22
- "reject_label": "Reject duplicate merge",
21
+ "apply_label": "Apply duplicate merge",
22
+ "dismiss_label": "Dismiss duplicate merge",
23
23
  },
24
24
  "contradiction_candidate": {
25
25
  "relationship": "contradicts",
26
26
  "label": "Contradiction candidate",
27
- "approve_label": "Mark as contradiction",
28
- "reject_label": "Dismiss contradiction",
27
+ "apply_label": "Apply contradiction",
28
+ "dismiss_label": "Dismiss contradiction",
29
29
  },
30
30
  "supersession_recommendation": {
31
31
  "relationship": "supersedes",
32
32
  "label": "Supersession recommendation",
33
- "approve_label": "Approve supersession",
34
- "reject_label": "Dismiss supersession",
33
+ "apply_label": "Apply supersession",
34
+ "dismiss_label": "Dismiss supersession",
35
35
  },
36
36
  }
37
37
 
@@ -1021,20 +1021,40 @@ def _plain_english_supersession_summary(
1021
1021
  return summary
1022
1022
 
1023
1023
 
1024
+ def _review_explanation(kind: str, *, signal: float, reason: str, source: Dict[str, Any], target: Dict[str, Any]) -> Dict[str, Any]:
1025
+ source_status = str(source.get("memory_status") or "active")
1026
+ target_status = str(target.get("memory_status") or "active")
1027
+ reason_text = str(reason or "").strip() or "no explicit rationale captured"
1028
+ if kind == "duplicate_candidate":
1029
+ short = f"Possible duplicate merge with signal {signal:.2f}."
1030
+ elif kind == "contradiction_candidate":
1031
+ short = f"Possible contradiction with signal {signal:.2f}."
1032
+ elif kind == "supersession_recommendation":
1033
+ short = f"Possible supersession with signal {signal:.2f}."
1034
+ else:
1035
+ short = f"Governance review item with signal {signal:.2f}."
1036
+ return {
1037
+ "short": short,
1038
+ "reason": reason_text,
1039
+ "source_status": source_status,
1040
+ "target_status": target_status,
1041
+ }
1042
+
1043
+
1024
1044
  def _review_actions(kind: str, relationship: str) -> List[Dict[str, Any]]:
1025
1045
  meta = _REVIEW_KIND_METADATA.get(kind, {})
1026
1046
  return [
1027
1047
  {
1028
- "decision": "approve",
1048
+ "decision": "apply",
1029
1049
  "approved": True,
1030
1050
  "relationship": relationship,
1031
- "label": meta.get("approve_label") or "Approve",
1051
+ "label": meta.get("apply_label") or "Apply",
1032
1052
  },
1033
1053
  {
1034
- "decision": "reject",
1054
+ "decision": "dismiss",
1035
1055
  "approved": False,
1036
1056
  "relationship": relationship,
1037
- "label": meta.get("reject_label") or "Reject",
1057
+ "label": meta.get("dismiss_label") or "Dismiss",
1038
1058
  },
1039
1059
  ]
1040
1060
 
@@ -1047,6 +1067,18 @@ def _relationship_for_review(kind: str | None = None, relationship: str | None =
1047
1067
  return _REVIEW_KIND_METADATA.get(kind_key, {}).get("relationship", "")
1048
1068
 
1049
1069
 
1070
+ def _review_priority_label(priority: int) -> str:
1071
+ if priority >= 90:
1072
+ return "critical"
1073
+ if priority >= 70:
1074
+ return "high"
1075
+ if priority >= 40:
1076
+ return "medium"
1077
+ if priority > 0:
1078
+ return "low"
1079
+ return "none"
1080
+
1081
+
1050
1082
  def list_governance_review_items(
1051
1083
  *,
1052
1084
  categories: Optional[List[str]] = None,
@@ -1070,19 +1102,24 @@ def list_governance_review_items(
1070
1102
  plain_english = str(item.get("plain_english") or "").strip()
1071
1103
  if plain_english:
1072
1104
  summary = plain_english
1105
+ signal = float(item.get("signal") or 0.0)
1106
+ reason = str(item.get("reason") or "")
1107
+ priority = int(item.get("priority") or 0)
1073
1108
  review_items.append({
1074
1109
  "review_id": f"{kind}:{reference}->{target_reference}",
1075
1110
  "kind": kind,
1076
1111
  "kind_label": _REVIEW_KIND_METADATA.get(kind, {}).get("label") or kind.replace("_", " "),
1077
1112
  "relationship": relationship,
1078
- "priority": int(item.get("priority") or 0),
1113
+ "priority": priority,
1114
+ "priority_label": _review_priority_label(priority),
1079
1115
  "timestamp": item.get("timestamp"),
1080
1116
  "bucket": item.get("bucket"),
1081
- "signal": float(item.get("signal") or 0.0),
1082
- "reason": item.get("reason"),
1117
+ "signal": signal,
1118
+ "reason": reason,
1083
1119
  "reference": reference,
1084
1120
  "target_reference": target_reference,
1085
1121
  "summary": summary,
1122
+ "explanation": _review_explanation(kind, signal=signal, reason=reason, source=source, target=target),
1086
1123
  "actions": _review_actions(kind, relationship),
1087
1124
  "source": source,
1088
1125
  "target": target,
@@ -1234,6 +1271,27 @@ def rollback_governance_decision(
1234
1271
  return None
1235
1272
 
1236
1273
 
1274
+ def _governance_queue_explanation(kind: str, *, target_reference: str | None, priority: int, reason: str | None = None, signal: float | None = None) -> Dict[str, Any]:
1275
+ reason_text = str(reason or "").strip() or "no explicit rationale captured"
1276
+ if kind == "duplicate_candidate":
1277
+ short = f"Possible duplicate relationship queued at priority {priority}."
1278
+ elif kind == "contradiction_candidate":
1279
+ short = f"Possible contradiction queued at priority {priority}."
1280
+ elif kind == "supersession_recommendation":
1281
+ short = f"Possible supersession queued at priority {priority}."
1282
+ else:
1283
+ short = f"Governance queue item queued at priority {priority}."
1284
+ payload = {
1285
+ "short": short,
1286
+ "reason": reason_text,
1287
+ "target_reference": target_reference,
1288
+ "priority": priority,
1289
+ }
1290
+ if signal is not None:
1291
+ payload["signal"] = float(signal)
1292
+ return payload
1293
+
1294
+
1237
1295
  def governance_queue(*, categories: Optional[List[str]] = None, limit: int = 100, scan_limit: int = 3000) -> List[Dict[str, Any]]:
1238
1296
  allowed = set(store.MEMORY_TABLES)
1239
1297
  tables = [table for table in (categories or list(allowed)) if table in allowed]
@@ -1260,36 +1318,62 @@ def governance_queue(*, categories: Optional[List[str]] = None, limit: int = 100
1260
1318
  supersession_recommendation = prov.get("supersession_recommendation") or {}
1261
1319
 
1262
1320
  for target in duplicate_candidates:
1321
+ priority = 40
1263
1322
  items.append({
1264
1323
  "reference": reference,
1265
1324
  "target_reference": target,
1266
1325
  "kind": "duplicate_candidate",
1267
- "priority": 40,
1326
+ "priority": priority,
1327
+ "priority_label": _review_priority_label(priority),
1268
1328
  "timestamp": timestamp,
1269
1329
  "bucket": table,
1270
1330
  "content": content,
1331
+ "explanation": _governance_queue_explanation(
1332
+ "duplicate_candidate",
1333
+ target_reference=target,
1334
+ priority=priority,
1335
+ ),
1271
1336
  })
1272
1337
  for target in contradiction_candidates:
1338
+ priority = 70
1273
1339
  items.append({
1274
1340
  "reference": reference,
1275
1341
  "target_reference": target,
1276
1342
  "kind": "contradiction_candidate",
1277
- "priority": 70,
1343
+ "priority": priority,
1344
+ "priority_label": _review_priority_label(priority),
1278
1345
  "timestamp": timestamp,
1279
1346
  "bucket": table,
1280
1347
  "content": content,
1348
+ "explanation": _governance_queue_explanation(
1349
+ "contradiction_candidate",
1350
+ target_reference=target,
1351
+ priority=priority,
1352
+ ),
1281
1353
  })
1282
1354
  if isinstance(supersession_recommendation, dict) and supersession_recommendation.get("recommended"):
1355
+ priority = 90
1356
+ signal = float(supersession_recommendation.get("signal") or 0.0)
1357
+ reason = supersession_recommendation.get("reason")
1358
+ target_reference = supersession_recommendation.get("target_reference")
1283
1359
  items.append({
1284
1360
  "reference": reference,
1285
- "target_reference": supersession_recommendation.get("target_reference"),
1361
+ "target_reference": target_reference,
1286
1362
  "kind": "supersession_recommendation",
1287
- "priority": 90,
1363
+ "priority": priority,
1364
+ "priority_label": _review_priority_label(priority),
1288
1365
  "timestamp": timestamp,
1289
1366
  "bucket": table,
1290
- "signal": float(supersession_recommendation.get("signal") or 0.0),
1291
- "reason": supersession_recommendation.get("reason"),
1367
+ "signal": signal,
1368
+ "reason": reason,
1292
1369
  "content": content,
1370
+ "explanation": _governance_queue_explanation(
1371
+ "supersession_recommendation",
1372
+ target_reference=target_reference,
1373
+ priority=priority,
1374
+ reason=reason,
1375
+ signal=signal,
1376
+ ),
1293
1377
  })
1294
1378
  items.sort(key=lambda item: (int(item.get("priority") or 0), str(item.get("timestamp") or "")), reverse=True)
1295
1379
  return items[:limit]
@@ -9,6 +9,7 @@ from ocmemog.runtime.providers import provider_execute
9
9
 
10
10
  LOGFILE = state_store.report_log_path()
11
11
  _MODEL_CACHE: dict[str, Any] = {}
12
+ _LAST_EMBEDDING_DIAGNOSTICS: dict[str, Any] = {}
12
13
 
13
14
 
14
15
  def _local_embedding(text: str, local_model: str) -> List[float] | None:
@@ -53,11 +54,16 @@ def _provider_embedding(text: str, model_name: str) -> tuple[List[float] | None,
53
54
  return None, meta
54
55
 
55
56
 
57
+ def get_last_embedding_diagnostics() -> dict[str, Any]:
58
+ return dict(_LAST_EMBEDDING_DIAGNOSTICS)
59
+
60
+
56
61
  def generate_embedding(
57
62
  text: str,
58
63
  *,
59
64
  skip_provider: bool = False,
60
65
  ) -> List[float] | None:
66
+ global _LAST_EMBEDDING_DIAGNOSTICS
61
67
  emit_event(LOGFILE, "brain_embedding_start", status="ok")
62
68
  if not isinstance(text, str) or not text.strip():
63
69
  emit_event(LOGFILE, "brain_embedding_failed", status="error", reason="empty_text")
@@ -72,10 +78,22 @@ def generate_embedding(
72
78
  or getattr(config, "OCMEMOG_EMBED_MODEL_PROVIDER", "")
73
79
  or getattr(config, "BRAIN_EMBED_MODEL_PROVIDER", "")
74
80
  )
81
+ _LAST_EMBEDDING_DIAGNOSTICS = {
82
+ "provider_configured": bool(provider_model),
83
+ "provider_attempted": False,
84
+ "provider_skipped": bool(provider_model and skip_provider),
85
+ "provider_succeeded": False,
86
+ "local_model": local_model or "simple",
87
+ "local_used": False,
88
+ "local_mode": "local_simple" if (local_model or "simple") in {"simple", "hash"} else "local_model",
89
+ "path_used": None,
90
+ "embedding_generated": False,
91
+ }
75
92
  embedding: List[float] | None = None
76
93
  provider_meta: dict[str, str] = {}
77
94
 
78
95
  if provider_model and not skip_provider:
96
+ _LAST_EMBEDDING_DIAGNOSTICS["provider_attempted"] = True
79
97
  try:
80
98
  embedding, provider_meta = _provider_embedding(text, provider_model)
81
99
  except TimeoutError as exc:
@@ -113,6 +131,9 @@ def generate_embedding(
113
131
  fallback="local" if local_model else "disabled",
114
132
  )
115
133
  elif embedding:
134
+ _LAST_EMBEDDING_DIAGNOSTICS["provider_succeeded"] = True
135
+ _LAST_EMBEDDING_DIAGNOSTICS["path_used"] = "provider"
136
+ _LAST_EMBEDDING_DIAGNOSTICS["embedding_generated"] = True
116
137
  emit_event(
117
138
  LOGFILE,
118
139
  "brain_embedding_complete",
@@ -144,6 +165,9 @@ def generate_embedding(
144
165
  embedding = _local_embedding(text, local_model)
145
166
  if embedding:
146
167
  provider = "local_simple" if local_model in {"simple", "hash"} else "local_model"
168
+ _LAST_EMBEDDING_DIAGNOSTICS["local_used"] = True
169
+ _LAST_EMBEDDING_DIAGNOSTICS["path_used"] = provider
170
+ _LAST_EMBEDDING_DIAGNOSTICS["embedding_generated"] = True
147
171
  emit_event(LOGFILE, "brain_embedding_complete", status="ok", provider=provider)
148
172
  emit_event(LOGFILE, "brain_embedding_generated", status="ok", provider=provider, dimensions=len(embedding))
149
173
  return embedding
@@ -60,13 +60,130 @@ def _destination_table(summary: str) -> str:
60
60
  return "knowledge"
61
61
 
62
62
 
63
+ def _normalized_text(text: str) -> str:
64
+ return " ".join((text or "").strip().lower().split())
65
+
66
+
67
+ def _is_redundant_generic_candidate(summary_text: str) -> bool:
68
+ normalized = _normalized_text(summary_text)
69
+ if not normalized:
70
+ return False
71
+ conn = store.connect()
72
+ try:
73
+ rows = conn.execute(
74
+ "SELECT content FROM knowledge ORDER BY id DESC LIMIT 200"
75
+ ).fetchall()
76
+ finally:
77
+ conn.close()
78
+ for row in rows:
79
+ existing = _normalized_text(row[0] if row else "")
80
+ if existing and existing == normalized:
81
+ return True
82
+ return False
83
+
84
+
85
+ def _should_reject_as_cruft(*, confidence: float, threshold: float, destination: str, summary_text: str) -> bool:
86
+ if destination != "knowledge" or confidence >= threshold:
87
+ return False
88
+ return bool(_normalized_text(summary_text))
89
+
90
+
91
+ def _is_ambiguous_specific_candidate(*, confidence: float, threshold: float, destination: str) -> bool:
92
+ if destination == "knowledge":
93
+ return False
94
+ margin = confidence - threshold
95
+ return margin < 0 and margin >= -0.2
96
+
97
+
98
+ def _quality_summary(*, decision: str, confidence: float, threshold: float, destination: str, redundant_generic: bool = False, ambiguous_specific: bool = False) -> Dict[str, Any]:
99
+ margin = round(confidence - threshold, 3)
100
+ if decision == "promote":
101
+ quality = "high" if margin >= 0.2 else "medium"
102
+ keep_recommendation = "keep"
103
+ noise_risk = "low"
104
+ else:
105
+ if destination == "knowledge":
106
+ quality = "low"
107
+ keep_recommendation = "drop"
108
+ noise_risk = "high"
109
+ elif ambiguous_specific:
110
+ quality = "medium"
111
+ keep_recommendation = "review"
112
+ noise_risk = "medium"
113
+ else:
114
+ quality = "medium"
115
+ keep_recommendation = "review"
116
+ noise_risk = "medium"
117
+ return {
118
+ "quality": quality,
119
+ "keep_recommendation": keep_recommendation,
120
+ "noise_risk": noise_risk,
121
+ "margin": margin,
122
+ "destination_specificity": "generic" if destination == "knowledge" else "specific",
123
+ "redundant_generic": bool(redundant_generic),
124
+ "ambiguous_specific": bool(ambiguous_specific),
125
+ }
126
+
127
+
128
+ def _verification_summary(*, decision: str, confidence: float, threshold: float, destination: str, redundant_generic: bool = False, ambiguous_specific: bool = False) -> Dict[str, Any]:
129
+ margin = round(confidence - threshold, 3)
130
+ if decision == "promote":
131
+ status = "verified"
132
+ reason = "meets_threshold"
133
+ else:
134
+ status = "needs_review"
135
+ if destination == "knowledge" and redundant_generic:
136
+ reason = "rejected_as_redundant_generic_cruft"
137
+ elif destination == "knowledge":
138
+ reason = "rejected_as_generic_cruft"
139
+ elif ambiguous_specific:
140
+ reason = "rejected_as_ambiguous_specific_memory"
141
+ else:
142
+ reason = "below_threshold"
143
+ return {
144
+ "status": status,
145
+ "reason": reason,
146
+ "confidence": round(confidence, 3),
147
+ "threshold": round(threshold, 3),
148
+ "margin": margin,
149
+ }
150
+
151
+
152
+ def _promotion_explanation(*, decision: str, destination: str, confidence: float, threshold: float, summary: str, redundant_generic: bool = False, ambiguous_specific: bool = False) -> Dict[str, Any]:
153
+ if decision == "promote":
154
+ short = f"Promoted to {destination} because confidence {confidence:.2f} met threshold {threshold:.2f}."
155
+ reason = "confidence_threshold"
156
+ else:
157
+ if destination == "knowledge" and redundant_generic:
158
+ short = f"Rejected as redundant memory cruft because confidence {confidence:.2f} was below threshold {threshold:.2f} and the summary closely matched existing generic knowledge."
159
+ reason = "rejected_as_redundant_generic_cruft"
160
+ elif destination == "knowledge":
161
+ short = f"Rejected as likely memory cruft because confidence {confidence:.2f} was below threshold {threshold:.2f} and the summary did not strongly fit a more specific bucket."
162
+ reason = "rejected_as_generic_cruft"
163
+ elif ambiguous_specific:
164
+ short = f"Rejected as an ambiguous specific memory because confidence {confidence:.2f} was below threshold {threshold:.2f} and the summary only weakly fit destination {destination}."
165
+ reason = "rejected_as_ambiguous_specific_memory"
166
+ else:
167
+ short = f"Rejected because confidence {confidence:.2f} was below threshold {threshold:.2f} for destination {destination}."
168
+ reason = "below_threshold"
169
+ return {
170
+ "short": short,
171
+ "reason": reason,
172
+ "destination": destination,
173
+ "confidence": round(confidence, 3),
174
+ "threshold": round(threshold, 3),
175
+ "summary_preview": summary[:160],
176
+ }
177
+
178
+
63
179
  def promote_candidate(candidate: Dict[str, Any]) -> Dict[str, Any]:
64
180
  from ocmemog.runtime.memory import api, reinforcement, vector_index
65
181
 
66
182
  emit_event(LOGFILE, "brain_memory_promote_start", status="ok")
67
183
  confidence = float(candidate.get("confidence_score", 0.0))
68
- decision = "promote" if _should_promote(confidence) else "reject"
184
+ threshold = float(config.OCMEMOG_PROMOTION_THRESHOLD)
69
185
  candidate_id = str(candidate.get("candidate_id") or "")
186
+ summary_text = str(candidate.get("distilled_summary", "") or "")
70
187
 
71
188
  candidate_metadata = provenance.normalize_metadata(candidate.get("metadata", {}), source="promote")
72
189
  candidate_metadata["candidate_id"] = candidate_id
@@ -75,11 +192,37 @@ def promote_candidate(candidate: Dict[str, Any]) -> Dict[str, Any]:
75
192
 
76
193
  conn = store.connect()
77
194
  promotion_id = None
78
- destination = _destination_table(str(candidate.get("distilled_summary", "")))
195
+ destination = _destination_table(summary_text)
196
+ redundant_generic = False
197
+ should_promote = _should_promote(confidence, threshold)
198
+ ambiguous_specific = _is_ambiguous_specific_candidate(
199
+ confidence=confidence,
200
+ threshold=threshold,
201
+ destination=destination,
202
+ )
203
+ if not should_promote and destination == "knowledge":
204
+ redundant_generic = _is_redundant_generic_candidate(summary_text)
205
+ reject_as_cruft = _should_reject_as_cruft(
206
+ confidence=confidence,
207
+ threshold=threshold,
208
+ destination=destination,
209
+ summary_text=summary_text,
210
+ )
211
+ decision = "promote" if should_promote and not reject_as_cruft else "reject"
212
+ decision_reason = "confidence_threshold"
213
+ if decision == "reject":
214
+ if destination == "knowledge" and redundant_generic:
215
+ decision_reason = "rejected_as_redundant_generic_cruft"
216
+ elif destination == "knowledge":
217
+ decision_reason = "rejected_as_generic_cruft"
218
+ elif ambiguous_specific:
219
+ decision_reason = "rejected_as_ambiguous_specific_memory"
220
+ else:
221
+ decision_reason = "below_threshold"
79
222
  if decision == "promote":
80
223
  row = conn.execute(
81
224
  "SELECT id FROM promotions WHERE source=? AND content=?",
82
- (str(candidate.get("source_event_id")), candidate.get("distilled_summary", "")),
225
+ (str(candidate.get("source_event_id")), summary_text),
83
226
  ).fetchone()
84
227
  if not row:
85
228
  cur = conn.execute(
@@ -94,9 +237,9 @@ def promote_candidate(candidate: Dict[str, Any]) -> Dict[str, Any]:
94
237
  str(candidate.get("source_event_id")),
95
238
  confidence,
96
239
  "promoted",
97
- "confidence_threshold",
240
+ decision_reason,
98
241
  json.dumps(candidate_metadata, ensure_ascii=False),
99
- candidate.get("distilled_summary", ""),
242
+ summary_text,
100
243
  store.SCHEMA_VERSION,
101
244
  ),
102
245
  )
@@ -106,7 +249,7 @@ def promote_candidate(candidate: Dict[str, Any]) -> Dict[str, Any]:
106
249
  str(candidate.get("source_event_id")),
107
250
  confidence,
108
251
  json.dumps(candidate_metadata, ensure_ascii=False),
109
- candidate.get("distilled_summary", ""),
252
+ summary_text,
110
253
  store.SCHEMA_VERSION,
111
254
  ),
112
255
  )
@@ -142,9 +285,9 @@ def promote_candidate(candidate: Dict[str, Any]) -> Dict[str, Any]:
142
285
  str(candidate.get("source_event_id")),
143
286
  confidence,
144
287
  "rejected",
145
- "below_threshold",
288
+ decision_reason,
146
289
  json.dumps(candidate_metadata, ensure_ascii=False),
147
- candidate.get("distilled_summary", ""),
290
+ summary_text,
148
291
  store.SCHEMA_VERSION,
149
292
  ),
150
293
  )
@@ -173,7 +316,7 @@ def promote_candidate(candidate: Dict[str, Any]) -> Dict[str, Any]:
173
316
  )
174
317
  emit_event(LOGFILE, "brain_memory_reinforcement_created", status="ok")
175
318
  if memory_id:
176
- vector_index.insert_memory(memory_id, candidate.get("distilled_summary", ""), confidence, source_type=destination)
319
+ vector_index.insert_memory(memory_id, summary_text, confidence, source_type=destination)
177
320
  try:
178
321
  api._auto_attach_governance_candidates(promoted_reference)
179
322
  except Exception as exc:
@@ -185,7 +328,37 @@ def promote_candidate(candidate: Dict[str, Any]) -> Dict[str, Any]:
185
328
  reference=promoted_reference,
186
329
  )
187
330
 
188
- return {"decision": decision, "confidence": confidence, "promotion_id": promotion_id, "destination": destination}
331
+ return {
332
+ "decision": decision,
333
+ "confidence": confidence,
334
+ "promotion_id": promotion_id,
335
+ "destination": destination,
336
+ "quality_summary": _quality_summary(
337
+ decision=decision,
338
+ confidence=confidence,
339
+ threshold=threshold,
340
+ destination=destination,
341
+ redundant_generic=redundant_generic,
342
+ ambiguous_specific=ambiguous_specific,
343
+ ),
344
+ "verification_summary": _verification_summary(
345
+ decision=decision,
346
+ confidence=confidence,
347
+ threshold=threshold,
348
+ destination=destination,
349
+ redundant_generic=redundant_generic,
350
+ ambiguous_specific=ambiguous_specific,
351
+ ),
352
+ "explanation": _promotion_explanation(
353
+ decision=decision,
354
+ destination=destination,
355
+ confidence=confidence,
356
+ threshold=threshold,
357
+ summary=str(candidate.get("distilled_summary", "") or ""),
358
+ redundant_generic=redundant_generic,
359
+ ambiguous_specific=ambiguous_specific,
360
+ ),
361
+ }
189
362
 
190
363
 
191
364
  def promote_candidate_by_id(candidate_id: str) -> Dict[str, Any]: