claude-memory-agent 2.0.0 → 2.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- package/README.md +206 -200
- package/agent_card.py +186 -0
- package/bin/cli.js +317 -181
- package/bin/postinstall.js +270 -216
- package/dashboard.html +4232 -2689
- package/hooks/__pycache__/grounding-hook.cpython-312.pyc +0 -0
- package/hooks/__pycache__/session_end.cpython-312.pyc +0 -0
- package/hooks/grounding-hook.py +422 -348
- package/hooks/session_end.py +293 -192
- package/hooks/session_start.py +227 -227
- package/install.py +919 -887
- package/main.py +4496 -2859
- package/package.json +47 -55
- package/services/__init__.py +50 -50
- package/services/__pycache__/__init__.cpython-312.pyc +0 -0
- package/services/__pycache__/curator.cpython-312.pyc +0 -0
- package/services/__pycache__/database.cpython-312.pyc +0 -0
- package/services/curator.py +1606 -0
- package/services/database.py +3637 -2485
- package/skills/__init__.py +21 -1
- package/skills/__pycache__/__init__.cpython-312.pyc +0 -0
- package/skills/__pycache__/confidence_tracker.cpython-312.pyc +0 -0
- package/skills/__pycache__/context.cpython-312.pyc +0 -0
- package/skills/__pycache__/curator.cpython-312.pyc +0 -0
- package/skills/__pycache__/search.cpython-312.pyc +0 -0
- package/skills/__pycache__/session_review.cpython-312.pyc +0 -0
- package/skills/__pycache__/store.cpython-312.pyc +0 -0
- package/skills/confidence_tracker.py +441 -0
- package/skills/context.py +675 -0
- package/skills/curator.py +348 -0
- package/skills/search.py +369 -213
- package/skills/session_review.py +418 -0
- package/skills/store.py +377 -179
- package/update_system.py +829 -817
package/skills/search.py
CHANGED
|
@@ -1,213 +1,369 @@
|
|
|
1
|
-
"""Semantic search skill with context filtering and fallback support."""
|
|
2
|
-
|
|
3
|
-
from
|
|
4
|
-
from services.
|
|
5
|
-
|
|
6
|
-
|
|
7
|
-
|
|
8
|
-
|
|
9
|
-
|
|
10
|
-
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
|
|
34
|
-
|
|
35
|
-
|
|
36
|
-
|
|
37
|
-
|
|
38
|
-
|
|
39
|
-
|
|
40
|
-
|
|
41
|
-
|
|
42
|
-
|
|
43
|
-
|
|
44
|
-
|
|
45
|
-
|
|
46
|
-
|
|
47
|
-
|
|
48
|
-
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
|
|
71
|
-
|
|
72
|
-
|
|
73
|
-
|
|
74
|
-
|
|
75
|
-
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
79
|
-
"
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
|
|
91
|
-
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
|
|
95
|
-
|
|
96
|
-
|
|
97
|
-
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
|
|
114
|
-
|
|
115
|
-
|
|
116
|
-
|
|
117
|
-
|
|
118
|
-
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
124
|
-
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
145
|
-
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
157
|
-
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
180
|
-
|
|
181
|
-
|
|
182
|
-
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
|
|
193
|
-
|
|
194
|
-
|
|
195
|
-
|
|
196
|
-
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
|
|
201
|
-
|
|
202
|
-
|
|
203
|
-
|
|
204
|
-
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
|
|
208
|
-
|
|
209
|
-
|
|
210
|
-
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
1
|
+
"""Semantic search skill with context filtering and fallback support."""
|
|
2
|
+
import logging
|
|
3
|
+
from typing import Dict, Any, Optional, List
|
|
4
|
+
from services.database import DatabaseService
|
|
5
|
+
from services.embeddings import EmbeddingService
|
|
6
|
+
|
|
7
|
+
logger = logging.getLogger(__name__)
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
async def _enrich_with_graph_context(db: DatabaseService, results: list) -> list:
|
|
11
|
+
"""
|
|
12
|
+
Enrich search results with relationship context.
|
|
13
|
+
This helps Claude understand the causal chains and related knowledge.
|
|
14
|
+
|
|
15
|
+
Args:
|
|
16
|
+
db: Database service instance
|
|
17
|
+
results: List of search results to enrich
|
|
18
|
+
|
|
19
|
+
Returns:
|
|
20
|
+
List of enriched results with graph context added
|
|
21
|
+
"""
|
|
22
|
+
enriched = []
|
|
23
|
+
for result in results:
|
|
24
|
+
memory_id = result.get('id')
|
|
25
|
+
if not memory_id:
|
|
26
|
+
enriched.append(result)
|
|
27
|
+
continue
|
|
28
|
+
|
|
29
|
+
# Create enriched copy
|
|
30
|
+
enriched_result = dict(result)
|
|
31
|
+
|
|
32
|
+
memory_type = result.get('type', '')
|
|
33
|
+
|
|
34
|
+
# For errors: find what fixes them
|
|
35
|
+
if memory_type == 'error':
|
|
36
|
+
try:
|
|
37
|
+
fixes = await db.get_related_memories(memory_id, 'fixes', direction='incoming', depth=1)
|
|
38
|
+
if fixes:
|
|
39
|
+
enriched_result['known_fixes'] = [
|
|
40
|
+
{'id': f['id'], 'content': f['content'][:200], 'outcome': f.get('outcome')}
|
|
41
|
+
for f in fixes
|
|
42
|
+
]
|
|
43
|
+
except Exception as e:
|
|
44
|
+
logger.debug(f"Failed to get fixes for memory {memory_id}: {e}")
|
|
45
|
+
|
|
46
|
+
# For decisions: find rationale and consequences
|
|
47
|
+
if memory_type == 'decision':
|
|
48
|
+
try:
|
|
49
|
+
# What supports this decision
|
|
50
|
+
supports = await db.get_related_memories(memory_id, 'supports', direction='incoming', depth=1)
|
|
51
|
+
if supports:
|
|
52
|
+
enriched_result['rationale'] = [
|
|
53
|
+
{'id': s['id'], 'content': s['content'][:200]}
|
|
54
|
+
for s in supports
|
|
55
|
+
]
|
|
56
|
+
except Exception as e:
|
|
57
|
+
logger.debug(f"Failed to get supports for memory {memory_id}: {e}")
|
|
58
|
+
|
|
59
|
+
try:
|
|
60
|
+
# What this decision caused
|
|
61
|
+
caused = await db.get_related_memories(memory_id, 'caused_by', direction='outgoing', depth=1)
|
|
62
|
+
if caused:
|
|
63
|
+
enriched_result['consequences'] = [
|
|
64
|
+
{'id': c['id'], 'content': c['content'][:200]}
|
|
65
|
+
for c in caused
|
|
66
|
+
]
|
|
67
|
+
except Exception as e:
|
|
68
|
+
logger.debug(f"Failed to get consequences for memory {memory_id}: {e}")
|
|
69
|
+
|
|
70
|
+
# For all types: find contradictions (critical for anti-hallucination)
|
|
71
|
+
try:
|
|
72
|
+
contradictions = await db.find_contradictions(memory_id)
|
|
73
|
+
if contradictions:
|
|
74
|
+
enriched_result['contradictions'] = [
|
|
75
|
+
{'id': c['id'], 'content': c['content'][:200]}
|
|
76
|
+
for c in contradictions
|
|
77
|
+
]
|
|
78
|
+
except Exception as e:
|
|
79
|
+
logger.debug(f"Failed to get contradictions for memory {memory_id}: {e}")
|
|
80
|
+
|
|
81
|
+
# For errors and decisions: include causal chain
|
|
82
|
+
if memory_type in ['error', 'decision', 'code']:
|
|
83
|
+
try:
|
|
84
|
+
chain = await db.get_causal_chain(memory_id, max_depth=3)
|
|
85
|
+
if chain and (chain.get('causes') or chain.get('fixes') or chain.get('root_causes')):
|
|
86
|
+
enriched_result['causal_chain'] = chain
|
|
87
|
+
except Exception as e:
|
|
88
|
+
logger.debug(f"Failed to get causal chain for memory {memory_id}: {e}")
|
|
89
|
+
|
|
90
|
+
enriched.append(enriched_result)
|
|
91
|
+
|
|
92
|
+
return enriched
|
|
93
|
+
|
|
94
|
+
|
|
95
|
+
async def semantic_search(
|
|
96
|
+
db: DatabaseService,
|
|
97
|
+
embeddings: EmbeddingService,
|
|
98
|
+
query: str,
|
|
99
|
+
limit: int = 10,
|
|
100
|
+
memory_type: Optional[str] = None,
|
|
101
|
+
session_id: Optional[str] = None,
|
|
102
|
+
project_path: Optional[str] = None,
|
|
103
|
+
agent_type: Optional[str] = None,
|
|
104
|
+
success_only: bool = False,
|
|
105
|
+
threshold: float = 0.5,
|
|
106
|
+
# Outcome spectrum filters
|
|
107
|
+
include_failed: bool = False,
|
|
108
|
+
include_superseded: bool = False,
|
|
109
|
+
include_unreliable: bool = False,
|
|
110
|
+
outcome_status: Optional[str] = None,
|
|
111
|
+
# Context-aware search
|
|
112
|
+
current_context: Optional[Dict[str, Any]] = None,
|
|
113
|
+
auto_detect_context: bool = True,
|
|
114
|
+
# Graph enrichment
|
|
115
|
+
include_graph: bool = True
|
|
116
|
+
) -> Dict[str, Any]:
|
|
117
|
+
"""
|
|
118
|
+
Search memories using semantic similarity with context filters.
|
|
119
|
+
|
|
120
|
+
Includes automatic fallback to keyword search when Ollama is unavailable.
|
|
121
|
+
|
|
122
|
+
Outcome-aware search behavior:
|
|
123
|
+
- 'success' memories rank highest (1.5x boost)
|
|
124
|
+
- 'partial' memories shown with warning (1.0x - no penalty)
|
|
125
|
+
- 'failed' memories excluded by default (use include_failed=True to show)
|
|
126
|
+
- 'superseded' memories excluded and replaced with their superseding memory
|
|
127
|
+
- 'pending' memories shown normally (1.0x)
|
|
128
|
+
- Unreliable memories (failure_count >= 3) excluded by default (use include_unreliable=True)
|
|
129
|
+
|
|
130
|
+
Context-aware search:
|
|
131
|
+
- If current_context provided, memories that worked in similar contexts get +0.2 boost
|
|
132
|
+
- Memories that failed in similar contexts get -0.2 penalty
|
|
133
|
+
- If auto_detect_context=True and project_path provided, context is auto-detected
|
|
134
|
+
|
|
135
|
+
Args:
|
|
136
|
+
db: Database service instance
|
|
137
|
+
embeddings: Embedding service instance
|
|
138
|
+
query: Search query text
|
|
139
|
+
limit: Maximum number of results
|
|
140
|
+
memory_type: Filter by type (session, decision, code, chunk, error)
|
|
141
|
+
session_id: Filter by session ID
|
|
142
|
+
project_path: Filter by project
|
|
143
|
+
agent_type: Filter by agent that created the memory
|
|
144
|
+
success_only: Only return memories marked as successful (legacy)
|
|
145
|
+
threshold: Minimum similarity threshold (0-1)
|
|
146
|
+
include_failed: Include memories with outcome_status='failed' (default False)
|
|
147
|
+
include_superseded: Include memories with outcome_status='superseded' (default False)
|
|
148
|
+
include_unreliable: Include memories with failure_count >= 3 (default False)
|
|
149
|
+
outcome_status: Filter by specific outcome status
|
|
150
|
+
current_context: Context dict with project_type, tech_stack, file_patterns
|
|
151
|
+
auto_detect_context: If True and project_path provided, auto-detect context
|
|
152
|
+
include_graph: Enrich results with graph context (fixes, rationale, contradictions)
|
|
153
|
+
|
|
154
|
+
Returns:
|
|
155
|
+
Dict with search results ranked by: (similarity * 0.7) + (confidence * 0.3) + context_adjustment
|
|
156
|
+
Each result includes outcome_status, outcome_warning, outcome_boost, context_adjustment,
|
|
157
|
+
and context_recommendation fields.
|
|
158
|
+
"""
|
|
159
|
+
# Auto-detect context from project_path if enabled
|
|
160
|
+
detected_context = None
|
|
161
|
+
if auto_detect_context and project_path and not current_context:
|
|
162
|
+
try:
|
|
163
|
+
from skills.context import detect_project_context
|
|
164
|
+
detected_context = detect_project_context(project_path)
|
|
165
|
+
except Exception:
|
|
166
|
+
pass
|
|
167
|
+
|
|
168
|
+
# Use provided context or detected context
|
|
169
|
+
search_context = current_context or detected_context
|
|
170
|
+
|
|
171
|
+
# Generate embedding for the query (may return None if Ollama unavailable)
|
|
172
|
+
query_embedding = await embeddings.generate_embedding(query)
|
|
173
|
+
|
|
174
|
+
# Determine search method based on embedding availability
|
|
175
|
+
search_method = "semantic"
|
|
176
|
+
results = []
|
|
177
|
+
|
|
178
|
+
if query_embedding is not None:
|
|
179
|
+
# Use semantic search with embeddings
|
|
180
|
+
results = await db.search_similar(
|
|
181
|
+
embedding=query_embedding,
|
|
182
|
+
limit=limit,
|
|
183
|
+
memory_type=memory_type,
|
|
184
|
+
session_id=session_id,
|
|
185
|
+
project_path=project_path,
|
|
186
|
+
agent_type=agent_type,
|
|
187
|
+
success_only=success_only,
|
|
188
|
+
threshold=threshold,
|
|
189
|
+
include_failed=include_failed,
|
|
190
|
+
include_superseded=include_superseded,
|
|
191
|
+
include_unreliable=include_unreliable,
|
|
192
|
+
outcome_status=outcome_status,
|
|
193
|
+
current_context=search_context
|
|
194
|
+
)
|
|
195
|
+
else:
|
|
196
|
+
# Fallback to keyword search
|
|
197
|
+
search_method = "keyword"
|
|
198
|
+
results = await db.keyword_search(
|
|
199
|
+
query=query,
|
|
200
|
+
limit=limit,
|
|
201
|
+
memory_type=memory_type,
|
|
202
|
+
session_id=session_id,
|
|
203
|
+
project_path=project_path,
|
|
204
|
+
agent_type=agent_type,
|
|
205
|
+
success_only=success_only,
|
|
206
|
+
include_failed=include_failed,
|
|
207
|
+
include_superseded=include_superseded,
|
|
208
|
+
include_unreliable=include_unreliable,
|
|
209
|
+
outcome_status=outcome_status
|
|
210
|
+
)
|
|
211
|
+
|
|
212
|
+
# Enrich with graph context if requested
|
|
213
|
+
if include_graph:
|
|
214
|
+
try:
|
|
215
|
+
results = await _enrich_with_graph_context(db, results)
|
|
216
|
+
except Exception as e:
|
|
217
|
+
logger.warning(f"Failed to enrich with graph context: {e}")
|
|
218
|
+
# Continue with unenriched results
|
|
219
|
+
|
|
220
|
+
return {
|
|
221
|
+
"success": True,
|
|
222
|
+
"query": query,
|
|
223
|
+
"results": results,
|
|
224
|
+
"count": len(results),
|
|
225
|
+
"search_method": search_method,
|
|
226
|
+
"degraded_mode": embeddings.is_degraded(),
|
|
227
|
+
"filters": {
|
|
228
|
+
"type": memory_type,
|
|
229
|
+
"project": project_path,
|
|
230
|
+
"agent": agent_type,
|
|
231
|
+
"success_only": success_only,
|
|
232
|
+
"include_failed": include_failed,
|
|
233
|
+
"include_superseded": include_superseded,
|
|
234
|
+
"include_unreliable": include_unreliable,
|
|
235
|
+
"outcome_status": outcome_status,
|
|
236
|
+
"include_graph": include_graph
|
|
237
|
+
},
|
|
238
|
+
"context_aware": search_context is not None,
|
|
239
|
+
"detected_context": detected_context,
|
|
240
|
+
"threshold": threshold if search_method == "semantic" else None
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
|
|
244
|
+
async def search_patterns(
|
|
245
|
+
db: DatabaseService,
|
|
246
|
+
embeddings: EmbeddingService,
|
|
247
|
+
query: str,
|
|
248
|
+
limit: int = 5,
|
|
249
|
+
problem_type: Optional[str] = None,
|
|
250
|
+
threshold: float = 0.5
|
|
251
|
+
) -> Dict[str, Any]:
|
|
252
|
+
"""
|
|
253
|
+
Search for reusable solution patterns.
|
|
254
|
+
|
|
255
|
+
Includes fallback to keyword search when Ollama is unavailable.
|
|
256
|
+
|
|
257
|
+
Args:
|
|
258
|
+
db: Database service instance
|
|
259
|
+
embeddings: Embedding service instance
|
|
260
|
+
query: Problem description or search query
|
|
261
|
+
limit: Maximum number of results
|
|
262
|
+
problem_type: Filter by problem type
|
|
263
|
+
threshold: Minimum similarity threshold
|
|
264
|
+
|
|
265
|
+
Returns:
|
|
266
|
+
Dict with patterns ranked by similarity * success_rate
|
|
267
|
+
"""
|
|
268
|
+
query_embedding = await embeddings.generate_embedding(query)
|
|
269
|
+
|
|
270
|
+
search_method = "semantic"
|
|
271
|
+
results = []
|
|
272
|
+
|
|
273
|
+
if query_embedding is not None:
|
|
274
|
+
results = await db.search_patterns(
|
|
275
|
+
embedding=query_embedding,
|
|
276
|
+
limit=limit,
|
|
277
|
+
problem_type=problem_type,
|
|
278
|
+
threshold=threshold
|
|
279
|
+
)
|
|
280
|
+
else:
|
|
281
|
+
# Fallback: keyword search on patterns table
|
|
282
|
+
search_method = "keyword"
|
|
283
|
+
results = await db.keyword_search_patterns(
|
|
284
|
+
query=query,
|
|
285
|
+
limit=limit,
|
|
286
|
+
problem_type=problem_type
|
|
287
|
+
)
|
|
288
|
+
|
|
289
|
+
return {
|
|
290
|
+
"success": True,
|
|
291
|
+
"query": query,
|
|
292
|
+
"patterns": results,
|
|
293
|
+
"count": len(results),
|
|
294
|
+
"search_method": search_method,
|
|
295
|
+
"degraded_mode": embeddings.is_degraded(),
|
|
296
|
+
"problem_type": problem_type
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
|
|
300
|
+
async def get_project_context(
|
|
301
|
+
db: DatabaseService,
|
|
302
|
+
embeddings: EmbeddingService,
|
|
303
|
+
project_path: str,
|
|
304
|
+
query: Optional[str] = None,
|
|
305
|
+
limit: int = 10
|
|
306
|
+
) -> Dict[str, Any]:
|
|
307
|
+
"""
|
|
308
|
+
Get all relevant context for a project.
|
|
309
|
+
|
|
310
|
+
Includes fallback to keyword search when Ollama is unavailable.
|
|
311
|
+
|
|
312
|
+
Args:
|
|
313
|
+
db: Database service instance
|
|
314
|
+
embeddings: Embedding service instance
|
|
315
|
+
project_path: Path to the project
|
|
316
|
+
query: Optional query to filter relevant memories
|
|
317
|
+
limit: Max memories to return
|
|
318
|
+
|
|
319
|
+
Returns:
|
|
320
|
+
Dict with project info and relevant memories
|
|
321
|
+
"""
|
|
322
|
+
# Get project info
|
|
323
|
+
project = await db.get_project(project_path)
|
|
324
|
+
|
|
325
|
+
# Get recent decisions for this project
|
|
326
|
+
decisions = await db.get_memories_by_type(
|
|
327
|
+
memory_type="decision",
|
|
328
|
+
project_path=project_path,
|
|
329
|
+
limit=limit
|
|
330
|
+
)
|
|
331
|
+
|
|
332
|
+
# Get patterns used in this project
|
|
333
|
+
patterns = await db.get_memories_by_type(
|
|
334
|
+
memory_type="code",
|
|
335
|
+
project_path=project_path,
|
|
336
|
+
limit=limit
|
|
337
|
+
)
|
|
338
|
+
|
|
339
|
+
# If query provided, search for relevant memories
|
|
340
|
+
relevant = []
|
|
341
|
+
search_method = None
|
|
342
|
+
if query:
|
|
343
|
+
query_embedding = await embeddings.generate_embedding(query)
|
|
344
|
+
if query_embedding is not None:
|
|
345
|
+
search_method = "semantic"
|
|
346
|
+
relevant = await db.search_similar(
|
|
347
|
+
embedding=query_embedding,
|
|
348
|
+
project_path=project_path,
|
|
349
|
+
limit=limit,
|
|
350
|
+
threshold=0.4
|
|
351
|
+
)
|
|
352
|
+
else:
|
|
353
|
+
# Fallback to keyword search
|
|
354
|
+
search_method = "keyword"
|
|
355
|
+
relevant = await db.keyword_search(
|
|
356
|
+
query=query,
|
|
357
|
+
project_path=project_path,
|
|
358
|
+
limit=limit
|
|
359
|
+
)
|
|
360
|
+
|
|
361
|
+
return {
|
|
362
|
+
"success": True,
|
|
363
|
+
"project": project,
|
|
364
|
+
"decisions": decisions,
|
|
365
|
+
"code_patterns": patterns,
|
|
366
|
+
"relevant_to_query": relevant if query else None,
|
|
367
|
+
"search_method": search_method,
|
|
368
|
+
"degraded_mode": embeddings.is_degraded()
|
|
369
|
+
}
|