claude-memory-agent 3.0.3 → 3.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/dashboard.html +88 -38
- package/hooks/auto-detect-response.py +3 -6
- package/hooks/detect-correction.py +8 -7
- package/hooks/grounding-hook.py +19 -3
- package/hooks/log-tool-use.py +3 -6
- package/hooks/log-user-request.py +14 -6
- package/hooks/pre-tool-decision.py +3 -6
- package/hooks/session_end_hook.py +37 -0
- package/hooks/session_start.py +10 -0
- package/hooks/stop_hook.py +123 -0
- package/install.py +139 -44
- package/main.py +133 -13
- package/mcp_proxy.py +6 -0
- package/package.json +2 -2
- package/services/agent_registry.py +260 -12
- package/services/database.py +186 -0
- package/services/soul.py +467 -0
package/services/soul.py
ADDED
|
@@ -0,0 +1,467 @@
|
|
|
1
|
+
"""Soul Service — Synthesis engine for persistent personality and learning.
|
|
2
|
+
|
|
3
|
+
Provides 4 core functions:
|
|
4
|
+
1. generate_soul_brief — Session start: returns synthesized context string
|
|
5
|
+
2. capture_soul_fragment — Stop hook: lightweight regex extraction of high-signal content
|
|
6
|
+
3. run_soul_integration — Session end: merge fragments into persistent soul_state
|
|
7
|
+
4. enrich_with_soul — memory_ask: add soul context to search results
|
|
8
|
+
|
|
9
|
+
All functions are designed with strict time budgets:
|
|
10
|
+
- generate_soul_brief: < 1s (pure DB read)
|
|
11
|
+
- capture_soul_fragment: < 200ms (regex only, no LLM)
|
|
12
|
+
- run_soul_integration: < 5s (heuristics first, LLM only if 20+ fragments)
|
|
13
|
+
- enrich_with_soul: < 200ms (DB read only)
|
|
14
|
+
"""
|
|
15
|
+
|
|
16
|
+
import re
|
|
17
|
+
import json
|
|
18
|
+
import logging
|
|
19
|
+
from datetime import datetime
|
|
20
|
+
from typing import Dict, Any, List, Optional
|
|
21
|
+
|
|
22
|
+
logger = logging.getLogger(__name__)
|
|
23
|
+
|
|
24
|
+
# ---------------------------------------------------------------------------
|
|
25
|
+
# Fragment extraction patterns (regex, no LLM)
|
|
26
|
+
# ---------------------------------------------------------------------------
|
|
27
|
+
|
|
28
|
+
SOUL_PATTERNS: Dict[str, List[re.Pattern]] = {
|
|
29
|
+
"decision_made": [
|
|
30
|
+
re.compile(
|
|
31
|
+
r"(?:let's|we'll|going to|chose to|decided to)\s+(?:use|go with|implement|try)\s+(.+)",
|
|
32
|
+
re.IGNORECASE,
|
|
33
|
+
),
|
|
34
|
+
re.compile(
|
|
35
|
+
r"(?:using|choosing|picked)\s+(\S+)\s+(?:because|since|for)",
|
|
36
|
+
re.IGNORECASE,
|
|
37
|
+
),
|
|
38
|
+
],
|
|
39
|
+
"preference_expressed": [
|
|
40
|
+
re.compile(
|
|
41
|
+
r"(?:I prefer|you should always|always use|never use|don't use|I like to)\s+(.+)",
|
|
42
|
+
re.IGNORECASE,
|
|
43
|
+
),
|
|
44
|
+
re.compile(
|
|
45
|
+
r"(?:remember to|make sure to|don't forget to)\s+(.+)",
|
|
46
|
+
re.IGNORECASE,
|
|
47
|
+
),
|
|
48
|
+
],
|
|
49
|
+
"error_resolved": [
|
|
50
|
+
re.compile(
|
|
51
|
+
r"(?:fixed|resolved|solved|the issue was|root cause)\s*:?\s*(.+)",
|
|
52
|
+
re.IGNORECASE,
|
|
53
|
+
),
|
|
54
|
+
re.compile(
|
|
55
|
+
r"(?:the (?:fix|solution) (?:was|is))\s+(.+)",
|
|
56
|
+
re.IGNORECASE,
|
|
57
|
+
),
|
|
58
|
+
],
|
|
59
|
+
"pattern_used": [
|
|
60
|
+
re.compile(
|
|
61
|
+
r"(?:same (?:approach|pattern|method) as)\s+(.+)",
|
|
62
|
+
re.IGNORECASE,
|
|
63
|
+
),
|
|
64
|
+
re.compile(
|
|
65
|
+
r"(?:like we did (?:for|in|with))\s+(.+)",
|
|
66
|
+
re.IGNORECASE,
|
|
67
|
+
),
|
|
68
|
+
],
|
|
69
|
+
"correction_received": [
|
|
70
|
+
re.compile(
|
|
71
|
+
r"(?:no,?\s+(?:actually|that's wrong|not like that)|(?:don't|stop)\s+(?:do|doing)\s+that)\s*[,:]?\s*(.+)",
|
|
72
|
+
re.IGNORECASE,
|
|
73
|
+
),
|
|
74
|
+
],
|
|
75
|
+
}
|
|
76
|
+
|
|
77
|
+
# Max content length per fragment
|
|
78
|
+
MAX_FRAGMENT_LENGTH = 300
|
|
79
|
+
|
|
80
|
+
|
|
81
|
+
class SoulService:
|
|
82
|
+
"""Central synthesis engine for persistent personality and learning."""
|
|
83
|
+
|
|
84
|
+
def __init__(self, db):
|
|
85
|
+
"""
|
|
86
|
+
Args:
|
|
87
|
+
db: DatabaseService instance with soul table methods
|
|
88
|
+
"""
|
|
89
|
+
self.db = db
|
|
90
|
+
|
|
91
|
+
# ------------------------------------------------------------------
|
|
92
|
+
# 1. generate_soul_brief — Called at session start
|
|
93
|
+
# ------------------------------------------------------------------
|
|
94
|
+
|
|
95
|
+
async def generate_soul_brief(self, project_path: str) -> str:
|
|
96
|
+
"""Generate a soul brief for session context injection.
|
|
97
|
+
|
|
98
|
+
Reads soul_state for this project and returns a 200-400 word brief.
|
|
99
|
+
Pure DB read, no LLM — budget: < 1s.
|
|
100
|
+
|
|
101
|
+
Returns:
|
|
102
|
+
Formatted brief string, or empty string if no soul state exists.
|
|
103
|
+
"""
|
|
104
|
+
state = await self.db.get_soul_state(project_path)
|
|
105
|
+
if not state or not state.get("soul_brief"):
|
|
106
|
+
# No soul state yet — return minimal placeholder
|
|
107
|
+
return ""
|
|
108
|
+
|
|
109
|
+
# Parse JSON fields safely
|
|
110
|
+
user_model = _safe_json_loads(state.get("user_model", "{}"), {})
|
|
111
|
+
project_understanding = _safe_json_loads(state.get("project_understanding", "{}"), {})
|
|
112
|
+
success_journal = _safe_json_loads(state.get("success_journal", "[]"), [])
|
|
113
|
+
blind_spots = _safe_json_loads(state.get("blind_spots", "[]"), [])
|
|
114
|
+
tool_preferences = _safe_json_loads(state.get("tool_preferences", "{}"), {})
|
|
115
|
+
integration_count = state.get("integration_count", 0)
|
|
116
|
+
|
|
117
|
+
# Build brief from template
|
|
118
|
+
parts = []
|
|
119
|
+
parts.append("[SOUL CONTEXT — Claude Memory]")
|
|
120
|
+
|
|
121
|
+
# Project header
|
|
122
|
+
project_name = project_understanding.get("name", project_path.split("/")[-1].split("\\")[-1])
|
|
123
|
+
parts.append(f"Project: {project_name}")
|
|
124
|
+
parts.append(f"Sessions integrated: {integration_count}")
|
|
125
|
+
if state.get("last_integrated_at"):
|
|
126
|
+
parts.append(f"Last integration: {state['last_integrated_at']}")
|
|
127
|
+
|
|
128
|
+
# User model section
|
|
129
|
+
if user_model:
|
|
130
|
+
parts.append("\nYOU KNOW THIS USER:")
|
|
131
|
+
preferences = user_model.get("preferences", [])
|
|
132
|
+
for pref in preferences[-5:]: # Last 5 preferences
|
|
133
|
+
parts.append(f"- {pref}")
|
|
134
|
+
dislikes = user_model.get("dislikes", [])
|
|
135
|
+
for dislike in dislikes[-3:]:
|
|
136
|
+
parts.append(f"- Dislikes: {dislike}")
|
|
137
|
+
work_style = user_model.get("work_style", [])
|
|
138
|
+
for ws in work_style[-3:]:
|
|
139
|
+
parts.append(f"- {ws}")
|
|
140
|
+
|
|
141
|
+
# Project understanding
|
|
142
|
+
if project_understanding:
|
|
143
|
+
parts.append("\nPROJECT UNDERSTANDING:")
|
|
144
|
+
tech = project_understanding.get("tech_choices", [])
|
|
145
|
+
for t in tech[-4:]:
|
|
146
|
+
parts.append(f"- {t}")
|
|
147
|
+
arch_decisions = project_understanding.get("architecture_decisions", [])
|
|
148
|
+
for ad in arch_decisions[-3:]:
|
|
149
|
+
parts.append(f"- {ad}")
|
|
150
|
+
focus = project_understanding.get("recent_focus", "")
|
|
151
|
+
if focus:
|
|
152
|
+
parts.append(f"- Recent focus: {focus}")
|
|
153
|
+
|
|
154
|
+
# Success journal (recent wins)
|
|
155
|
+
if success_journal:
|
|
156
|
+
parts.append("\nRECENT LEARNINGS:")
|
|
157
|
+
for entry in success_journal[-4:]:
|
|
158
|
+
parts.append(f"- {entry}")
|
|
159
|
+
|
|
160
|
+
# Blind spots
|
|
161
|
+
if blind_spots:
|
|
162
|
+
parts.append("\nACTIVE BLIND SPOTS:")
|
|
163
|
+
for bs in blind_spots[-3:]:
|
|
164
|
+
parts.append(f"- {bs}")
|
|
165
|
+
|
|
166
|
+
# Tool preferences
|
|
167
|
+
if tool_preferences:
|
|
168
|
+
favored = tool_preferences.get("favored", [])
|
|
169
|
+
if favored:
|
|
170
|
+
parts.append(f"\nTOOL PREFERENCES: {', '.join(favored[-5:])}")
|
|
171
|
+
|
|
172
|
+
return "\n".join(parts)
|
|
173
|
+
|
|
174
|
+
# ------------------------------------------------------------------
|
|
175
|
+
# 2. capture_soul_fragment — Called on every Stop hook
|
|
176
|
+
# ------------------------------------------------------------------
|
|
177
|
+
|
|
178
|
+
async def capture_soul_fragment(
|
|
179
|
+
self, session_id: str, fragment_type: str,
|
|
180
|
+
content: str, project_path: str = ""
|
|
181
|
+
) -> Optional[int]:
|
|
182
|
+
"""Capture a soul fragment from a response.
|
|
183
|
+
|
|
184
|
+
Lightweight — just stores in soul_fragments staging table.
|
|
185
|
+
Budget: < 200ms.
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
session_id: Current session ID
|
|
189
|
+
fragment_type: One of: decision_made, error_resolved, preference_expressed,
|
|
190
|
+
pattern_used, correction_received
|
|
191
|
+
content: The extracted fragment content
|
|
192
|
+
project_path: Project path
|
|
193
|
+
|
|
194
|
+
Returns:
|
|
195
|
+
Fragment ID if stored, None on error.
|
|
196
|
+
"""
|
|
197
|
+
# Truncate content
|
|
198
|
+
if len(content) > MAX_FRAGMENT_LENGTH:
|
|
199
|
+
content = content[:MAX_FRAGMENT_LENGTH] + "..."
|
|
200
|
+
|
|
201
|
+
return await self.db.insert_soul_fragment(
|
|
202
|
+
session_id=session_id,
|
|
203
|
+
project_path=project_path,
|
|
204
|
+
fragment_type=fragment_type,
|
|
205
|
+
content=content.strip(),
|
|
206
|
+
)
|
|
207
|
+
|
|
208
|
+
def extract_fragments(self, text: str) -> List[Dict[str, str]]:
|
|
209
|
+
"""Extract soul fragments from text using regex patterns.
|
|
210
|
+
|
|
211
|
+
Pure regex, no LLM. Budget: < 100ms.
|
|
212
|
+
|
|
213
|
+
Args:
|
|
214
|
+
text: Text to scan for high-signal content.
|
|
215
|
+
|
|
216
|
+
Returns:
|
|
217
|
+
List of {"fragment_type": ..., "content": ...} dicts.
|
|
218
|
+
"""
|
|
219
|
+
fragments = []
|
|
220
|
+
seen_content = set()
|
|
221
|
+
|
|
222
|
+
for fragment_type, patterns in SOUL_PATTERNS.items():
|
|
223
|
+
for pattern in patterns:
|
|
224
|
+
for match in pattern.finditer(text):
|
|
225
|
+
captured = match.group(1).strip() if match.lastindex else match.group(0).strip()
|
|
226
|
+
# Skip very short or very long matches
|
|
227
|
+
if len(captured) < 10 or len(captured) > MAX_FRAGMENT_LENGTH:
|
|
228
|
+
continue
|
|
229
|
+
# Deduplicate within this extraction
|
|
230
|
+
key = captured[:50].lower()
|
|
231
|
+
if key in seen_content:
|
|
232
|
+
continue
|
|
233
|
+
seen_content.add(key)
|
|
234
|
+
|
|
235
|
+
fragments.append({
|
|
236
|
+
"fragment_type": fragment_type,
|
|
237
|
+
"content": captured[:MAX_FRAGMENT_LENGTH],
|
|
238
|
+
})
|
|
239
|
+
|
|
240
|
+
return fragments
|
|
241
|
+
|
|
242
|
+
# ------------------------------------------------------------------
|
|
243
|
+
# 3. run_soul_integration — Called at session end
|
|
244
|
+
# ------------------------------------------------------------------
|
|
245
|
+
|
|
246
|
+
async def run_soul_integration(
|
|
247
|
+
self, session_id: str, project_path: str
|
|
248
|
+
) -> Dict[str, Any]:
|
|
249
|
+
"""Integrate soul fragments from a session into persistent soul_state.
|
|
250
|
+
|
|
251
|
+
Groups fragments by type, counts patterns, merges into soul_state
|
|
252
|
+
with recency weighting. Uses heuristics (no LLM).
|
|
253
|
+
Budget: < 5s.
|
|
254
|
+
|
|
255
|
+
Returns:
|
|
256
|
+
Dict with integration stats.
|
|
257
|
+
"""
|
|
258
|
+
# Get all unintegrated fragments for this project
|
|
259
|
+
fragments = await self.db.get_unintegrated_fragments(project_path)
|
|
260
|
+
if not fragments:
|
|
261
|
+
return {"integrated": 0, "message": "No fragments to integrate"}
|
|
262
|
+
|
|
263
|
+
# Load existing soul state (or create new)
|
|
264
|
+
state = await self.db.get_soul_state(project_path) or {}
|
|
265
|
+
user_model = _safe_json_loads(state.get("user_model", "{}"), {})
|
|
266
|
+
project_understanding = _safe_json_loads(state.get("project_understanding", "{}"), {})
|
|
267
|
+
success_journal = _safe_json_loads(state.get("success_journal", "[]"), [])
|
|
268
|
+
blind_spots = _safe_json_loads(state.get("blind_spots", "[]"), [])
|
|
269
|
+
tool_preferences = _safe_json_loads(state.get("tool_preferences", "{}"), {})
|
|
270
|
+
integration_count = state.get("integration_count", 0) or 0
|
|
271
|
+
|
|
272
|
+
# Group fragments by type
|
|
273
|
+
by_type: Dict[str, List[str]] = {}
|
|
274
|
+
for frag in fragments:
|
|
275
|
+
ft = frag["fragment_type"]
|
|
276
|
+
by_type.setdefault(ft, []).append(frag["content"])
|
|
277
|
+
|
|
278
|
+
# --- Merge decisions into project_understanding ---
|
|
279
|
+
decisions = by_type.get("decision_made", [])
|
|
280
|
+
if decisions:
|
|
281
|
+
arch_decisions = project_understanding.get("architecture_decisions", [])
|
|
282
|
+
for d in decisions:
|
|
283
|
+
entry = f"[Session {integration_count + 1}] {d}"
|
|
284
|
+
arch_decisions.append(entry)
|
|
285
|
+
# Keep last 20 decisions
|
|
286
|
+
project_understanding["architecture_decisions"] = arch_decisions[-20:]
|
|
287
|
+
|
|
288
|
+
# --- Merge preferences into user_model ---
|
|
289
|
+
preferences = by_type.get("preference_expressed", [])
|
|
290
|
+
if preferences:
|
|
291
|
+
user_prefs = user_model.get("preferences", [])
|
|
292
|
+
for p in preferences:
|
|
293
|
+
# Avoid near-duplicates
|
|
294
|
+
if not any(_is_similar(p, existing) for existing in user_prefs):
|
|
295
|
+
user_prefs.append(p)
|
|
296
|
+
user_model["preferences"] = user_prefs[-15:] # Keep last 15
|
|
297
|
+
|
|
298
|
+
# --- Merge error resolutions into success_journal ---
|
|
299
|
+
errors = by_type.get("error_resolved", [])
|
|
300
|
+
if errors:
|
|
301
|
+
for e in errors:
|
|
302
|
+
entry = f"Fixed: {e}"
|
|
303
|
+
success_journal.append(entry)
|
|
304
|
+
success_journal = success_journal[-15:] # Keep last 15
|
|
305
|
+
|
|
306
|
+
# --- Merge patterns into success_journal ---
|
|
307
|
+
patterns = by_type.get("pattern_used", [])
|
|
308
|
+
if patterns:
|
|
309
|
+
for p in patterns:
|
|
310
|
+
entry = f"Pattern: {p}"
|
|
311
|
+
success_journal.append(entry)
|
|
312
|
+
success_journal = success_journal[-15:]
|
|
313
|
+
|
|
314
|
+
# --- Merge corrections into blind_spots ---
|
|
315
|
+
corrections = by_type.get("correction_received", [])
|
|
316
|
+
if corrections:
|
|
317
|
+
for c in corrections:
|
|
318
|
+
entry = f"[Session {integration_count + 1}] {c}"
|
|
319
|
+
blind_spots.append(entry)
|
|
320
|
+
blind_spots = blind_spots[-10:] # Keep last 10
|
|
321
|
+
|
|
322
|
+
# Build soul brief text
|
|
323
|
+
brief = await self._build_brief_text(
|
|
324
|
+
project_path, user_model, project_understanding,
|
|
325
|
+
success_journal, blind_spots, tool_preferences,
|
|
326
|
+
integration_count + 1
|
|
327
|
+
)
|
|
328
|
+
|
|
329
|
+
# Update soul state
|
|
330
|
+
now = datetime.now().isoformat()
|
|
331
|
+
updates = {
|
|
332
|
+
"soul_brief": brief,
|
|
333
|
+
"user_model": json.dumps(user_model),
|
|
334
|
+
"project_understanding": json.dumps(project_understanding),
|
|
335
|
+
"success_journal": json.dumps(success_journal),
|
|
336
|
+
"blind_spots": json.dumps(blind_spots),
|
|
337
|
+
"tool_preferences": json.dumps(tool_preferences),
|
|
338
|
+
"last_integrated_at": now,
|
|
339
|
+
"integration_count": integration_count + 1,
|
|
340
|
+
}
|
|
341
|
+
|
|
342
|
+
success = await self.db.upsert_soul_state(project_path, updates)
|
|
343
|
+
|
|
344
|
+
# Mark fragments as integrated
|
|
345
|
+
integrated_count = 0
|
|
346
|
+
if success:
|
|
347
|
+
# Mark all fragments from this session AND any older unintegrated ones
|
|
348
|
+
for frag in fragments:
|
|
349
|
+
sid = frag.get("session_id", session_id)
|
|
350
|
+
await self.db.mark_fragments_integrated(sid)
|
|
351
|
+
integrated_count = len(fragments)
|
|
352
|
+
|
|
353
|
+
return {
|
|
354
|
+
"integrated": integrated_count,
|
|
355
|
+
"by_type": {k: len(v) for k, v in by_type.items()},
|
|
356
|
+
"integration_number": integration_count + 1,
|
|
357
|
+
"success": success,
|
|
358
|
+
}
|
|
359
|
+
|
|
360
|
+
async def _build_brief_text(
|
|
361
|
+
self, project_path: str, user_model: dict,
|
|
362
|
+
project_understanding: dict, success_journal: list,
|
|
363
|
+
blind_spots: list, tool_preferences: dict,
|
|
364
|
+
integration_count: int
|
|
365
|
+
) -> str:
|
|
366
|
+
"""Build the soul brief text from state components."""
|
|
367
|
+
parts = []
|
|
368
|
+
project_name = project_understanding.get(
|
|
369
|
+
"name",
|
|
370
|
+
project_path.replace("\\", "/").rstrip("/").split("/")[-1]
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
parts.append(f"Project: {project_name} | Sessions: {integration_count}")
|
|
374
|
+
|
|
375
|
+
if user_model.get("preferences"):
|
|
376
|
+
prefs = user_model["preferences"][-3:]
|
|
377
|
+
parts.append("User: " + "; ".join(prefs))
|
|
378
|
+
|
|
379
|
+
if project_understanding.get("architecture_decisions"):
|
|
380
|
+
recent = project_understanding["architecture_decisions"][-2:]
|
|
381
|
+
parts.append("Decisions: " + "; ".join(recent))
|
|
382
|
+
|
|
383
|
+
if success_journal:
|
|
384
|
+
recent = success_journal[-2:]
|
|
385
|
+
parts.append("Learnings: " + "; ".join(recent))
|
|
386
|
+
|
|
387
|
+
if blind_spots:
|
|
388
|
+
recent = blind_spots[-2:]
|
|
389
|
+
parts.append("Watch out: " + "; ".join(recent))
|
|
390
|
+
|
|
391
|
+
return " | ".join(parts) if parts else ""
|
|
392
|
+
|
|
393
|
+
# ------------------------------------------------------------------
|
|
394
|
+
# 4. enrich_with_soul — Called in memory_ask
|
|
395
|
+
# ------------------------------------------------------------------
|
|
396
|
+
|
|
397
|
+
async def enrich_with_soul(
|
|
398
|
+
self, search_results: Dict[str, Any], project_path: str
|
|
399
|
+
) -> Dict[str, Any]:
|
|
400
|
+
"""Add soul context to search results.
|
|
401
|
+
|
|
402
|
+
Pure DB read, budget: < 200ms.
|
|
403
|
+
|
|
404
|
+
Args:
|
|
405
|
+
search_results: Existing search results dict from memory_ask
|
|
406
|
+
project_path: Project to fetch soul for
|
|
407
|
+
|
|
408
|
+
Returns:
|
|
409
|
+
search_results dict with added 'soul_context' key.
|
|
410
|
+
"""
|
|
411
|
+
if not project_path:
|
|
412
|
+
return search_results
|
|
413
|
+
|
|
414
|
+
state = await self.db.get_soul_state(project_path)
|
|
415
|
+
if not state:
|
|
416
|
+
return search_results
|
|
417
|
+
|
|
418
|
+
soul_context = {}
|
|
419
|
+
|
|
420
|
+
# Include the brief
|
|
421
|
+
if state.get("soul_brief"):
|
|
422
|
+
soul_context["brief"] = state["soul_brief"]
|
|
423
|
+
|
|
424
|
+
# Parse and include relevant fields based on query
|
|
425
|
+
user_model = _safe_json_loads(state.get("user_model", "{}"), {})
|
|
426
|
+
if user_model.get("preferences"):
|
|
427
|
+
soul_context["user_preferences"] = user_model["preferences"][-5:]
|
|
428
|
+
|
|
429
|
+
blind_spots = _safe_json_loads(state.get("blind_spots", "[]"), [])
|
|
430
|
+
if blind_spots:
|
|
431
|
+
soul_context["blind_spots"] = blind_spots[-3:]
|
|
432
|
+
|
|
433
|
+
success_journal = _safe_json_loads(state.get("success_journal", "[]"), [])
|
|
434
|
+
if success_journal:
|
|
435
|
+
soul_context["recent_learnings"] = success_journal[-3:]
|
|
436
|
+
|
|
437
|
+
soul_context["integration_count"] = state.get("integration_count", 0)
|
|
438
|
+
|
|
439
|
+
search_results["soul_context"] = soul_context
|
|
440
|
+
return search_results
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
# ---------------------------------------------------------------------------
|
|
444
|
+
# Helpers
|
|
445
|
+
# ---------------------------------------------------------------------------
|
|
446
|
+
|
|
447
|
+
def _safe_json_loads(value: Any, default: Any) -> Any:
|
|
448
|
+
"""Safely parse JSON string, returning default on failure."""
|
|
449
|
+
if isinstance(value, (dict, list)):
|
|
450
|
+
return value
|
|
451
|
+
if not value or not isinstance(value, str):
|
|
452
|
+
return default
|
|
453
|
+
try:
|
|
454
|
+
return json.loads(value)
|
|
455
|
+
except (json.JSONDecodeError, TypeError):
|
|
456
|
+
return default
|
|
457
|
+
|
|
458
|
+
|
|
459
|
+
def _is_similar(a: str, b: str, threshold: float = 0.7) -> bool:
|
|
460
|
+
"""Quick similarity check between two strings using word overlap."""
|
|
461
|
+
words_a = set(a.lower().split())
|
|
462
|
+
words_b = set(b.lower().split())
|
|
463
|
+
if not words_a or not words_b:
|
|
464
|
+
return False
|
|
465
|
+
overlap = len(words_a & words_b)
|
|
466
|
+
total = max(len(words_a), len(words_b))
|
|
467
|
+
return (overlap / total) >= threshold
|