memorygraphMCP 0.11.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memorygraph/__init__.py +50 -0
- memorygraph/__main__.py +12 -0
- memorygraph/advanced_tools.py +509 -0
- memorygraph/analytics/__init__.py +46 -0
- memorygraph/analytics/advanced_queries.py +727 -0
- memorygraph/backends/__init__.py +21 -0
- memorygraph/backends/base.py +179 -0
- memorygraph/backends/cloud.py +75 -0
- memorygraph/backends/cloud_backend.py +858 -0
- memorygraph/backends/factory.py +577 -0
- memorygraph/backends/falkordb_backend.py +749 -0
- memorygraph/backends/falkordblite_backend.py +746 -0
- memorygraph/backends/ladybugdb_backend.py +242 -0
- memorygraph/backends/memgraph_backend.py +327 -0
- memorygraph/backends/neo4j_backend.py +298 -0
- memorygraph/backends/sqlite_fallback.py +463 -0
- memorygraph/backends/turso.py +448 -0
- memorygraph/cli.py +743 -0
- memorygraph/cloud_database.py +297 -0
- memorygraph/config.py +295 -0
- memorygraph/database.py +933 -0
- memorygraph/graph_analytics.py +631 -0
- memorygraph/integration/__init__.py +69 -0
- memorygraph/integration/context_capture.py +426 -0
- memorygraph/integration/project_analysis.py +583 -0
- memorygraph/integration/workflow_tracking.py +492 -0
- memorygraph/intelligence/__init__.py +59 -0
- memorygraph/intelligence/context_retrieval.py +447 -0
- memorygraph/intelligence/entity_extraction.py +386 -0
- memorygraph/intelligence/pattern_recognition.py +420 -0
- memorygraph/intelligence/temporal.py +374 -0
- memorygraph/migration/__init__.py +27 -0
- memorygraph/migration/manager.py +579 -0
- memorygraph/migration/models.py +142 -0
- memorygraph/migration/scripts/__init__.py +17 -0
- memorygraph/migration/scripts/bitemporal_migration.py +595 -0
- memorygraph/migration/scripts/multitenancy_migration.py +452 -0
- memorygraph/migration_tools_module.py +146 -0
- memorygraph/models.py +684 -0
- memorygraph/proactive/__init__.py +46 -0
- memorygraph/proactive/outcome_learning.py +444 -0
- memorygraph/proactive/predictive.py +410 -0
- memorygraph/proactive/session_briefing.py +399 -0
- memorygraph/relationships.py +668 -0
- memorygraph/server.py +883 -0
- memorygraph/sqlite_database.py +1876 -0
- memorygraph/tools/__init__.py +59 -0
- memorygraph/tools/activity_tools.py +262 -0
- memorygraph/tools/memory_tools.py +315 -0
- memorygraph/tools/migration_tools.py +181 -0
- memorygraph/tools/relationship_tools.py +147 -0
- memorygraph/tools/search_tools.py +406 -0
- memorygraph/tools/temporal_tools.py +339 -0
- memorygraph/utils/__init__.py +10 -0
- memorygraph/utils/context_extractor.py +429 -0
- memorygraph/utils/error_handling.py +151 -0
- memorygraph/utils/export_import.py +425 -0
- memorygraph/utils/graph_algorithms.py +200 -0
- memorygraph/utils/pagination.py +149 -0
- memorygraph/utils/project_detection.py +133 -0
- memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
- memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
- memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
- memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
- memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,46 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Proactive Features for Claude Code Memory Server.
|
|
3
|
+
|
|
4
|
+
This module provides AI-powered proactive features including:
|
|
5
|
+
- Session start intelligence and briefing
|
|
6
|
+
- Predictive suggestions based on current context
|
|
7
|
+
- Outcome learning and effectiveness tracking
|
|
8
|
+
- Advanced analytics queries
|
|
9
|
+
|
|
10
|
+
Phase 7 Implementation
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from .session_briefing import (
|
|
14
|
+
SessionBriefing,
|
|
15
|
+
generate_session_briefing,
|
|
16
|
+
get_session_briefing_resource,
|
|
17
|
+
)
|
|
18
|
+
from .predictive import (
|
|
19
|
+
Suggestion,
|
|
20
|
+
Warning,
|
|
21
|
+
predict_needs,
|
|
22
|
+
warn_potential_issues,
|
|
23
|
+
suggest_related_context,
|
|
24
|
+
)
|
|
25
|
+
from .outcome_learning import (
|
|
26
|
+
record_outcome,
|
|
27
|
+
update_pattern_effectiveness,
|
|
28
|
+
calculate_effectiveness_score,
|
|
29
|
+
)
|
|
30
|
+
|
|
31
|
+
__all__ = [
|
|
32
|
+
# Session briefing
|
|
33
|
+
"SessionBriefing",
|
|
34
|
+
"generate_session_briefing",
|
|
35
|
+
"get_session_briefing_resource",
|
|
36
|
+
# Predictive suggestions
|
|
37
|
+
"Suggestion",
|
|
38
|
+
"Warning",
|
|
39
|
+
"predict_needs",
|
|
40
|
+
"warn_potential_issues",
|
|
41
|
+
"suggest_related_context",
|
|
42
|
+
# Outcome learning
|
|
43
|
+
"record_outcome",
|
|
44
|
+
"update_pattern_effectiveness",
|
|
45
|
+
"calculate_effectiveness_score",
|
|
46
|
+
]
|
|
@@ -0,0 +1,444 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Outcome Learning and Effectiveness Tracking for Claude Code Memory Server.
|
|
3
|
+
|
|
4
|
+
Tracks solution effectiveness and learns from outcomes:
|
|
5
|
+
- Record solution outcomes (success/failure)
|
|
6
|
+
- Update effectiveness scores
|
|
7
|
+
- Propagate learning to patterns
|
|
8
|
+
- Decay old outcomes
|
|
9
|
+
|
|
10
|
+
Phase 7 Implementation - Learning From Outcomes
|
|
11
|
+
"""
|
|
12
|
+
|
|
13
|
+
from datetime import datetime, timedelta
|
|
14
|
+
from typing import Optional, Dict, Any
|
|
15
|
+
import logging
|
|
16
|
+
import math
|
|
17
|
+
|
|
18
|
+
from pydantic import BaseModel, Field
|
|
19
|
+
|
|
20
|
+
from ..backends.base import GraphBackend
|
|
21
|
+
from ..models import Memory, MemoryType
|
|
22
|
+
|
|
23
|
+
logger = logging.getLogger(__name__)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
class Outcome(BaseModel):
|
|
27
|
+
"""
|
|
28
|
+
Outcome of applying a memory (solution, pattern, etc.).
|
|
29
|
+
|
|
30
|
+
Tracks whether a solution worked and in what context.
|
|
31
|
+
"""
|
|
32
|
+
|
|
33
|
+
outcome_id: str
|
|
34
|
+
memory_id: str
|
|
35
|
+
success: bool
|
|
36
|
+
description: str
|
|
37
|
+
context: Optional[Dict[str, Any]] = None
|
|
38
|
+
timestamp: datetime = Field(default_factory=datetime.now)
|
|
39
|
+
impact: float = Field(default=1.0, ge=0.0, le=1.0) # How significant this outcome was
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
class EffectivenessScore(BaseModel):
|
|
43
|
+
"""
|
|
44
|
+
Effectiveness score for a memory.
|
|
45
|
+
|
|
46
|
+
Combines multiple outcomes to calculate overall effectiveness.
|
|
47
|
+
"""
|
|
48
|
+
|
|
49
|
+
memory_id: str
|
|
50
|
+
total_uses: int = 0
|
|
51
|
+
successful_uses: int = 0
|
|
52
|
+
failed_uses: int = 0
|
|
53
|
+
effectiveness: float = 0.5 # 0.0 to 1.0
|
|
54
|
+
confidence: float = 0.5 # How confident we are in this score
|
|
55
|
+
last_updated: datetime = Field(default_factory=datetime.now)
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
async def record_outcome(
|
|
59
|
+
backend: GraphBackend,
|
|
60
|
+
memory_id: str,
|
|
61
|
+
outcome_description: str,
|
|
62
|
+
success: bool,
|
|
63
|
+
context: Optional[Dict[str, Any]] = None,
|
|
64
|
+
impact: float = 1.0,
|
|
65
|
+
) -> bool:
|
|
66
|
+
"""
|
|
67
|
+
Record the outcome of using a memory.
|
|
68
|
+
|
|
69
|
+
Creates an Outcome node linked to the memory and updates effectiveness scores.
|
|
70
|
+
|
|
71
|
+
Args:
|
|
72
|
+
backend: Database backend
|
|
73
|
+
memory_id: ID of memory that was used
|
|
74
|
+
outcome_description: Description of what happened
|
|
75
|
+
success: Whether the outcome was successful
|
|
76
|
+
context: Additional context about the outcome
|
|
77
|
+
impact: How significant this outcome was (0.0 to 1.0)
|
|
78
|
+
|
|
79
|
+
Returns:
|
|
80
|
+
True if outcome was recorded successfully
|
|
81
|
+
|
|
82
|
+
Example:
|
|
83
|
+
>>> success = await record_outcome(
|
|
84
|
+
... backend,
|
|
85
|
+
... "solution_123",
|
|
86
|
+
... "Fixed the authentication bug",
|
|
87
|
+
... success=True,
|
|
88
|
+
... impact=0.9
|
|
89
|
+
... )
|
|
90
|
+
"""
|
|
91
|
+
logger.info(f"Recording outcome for memory {memory_id}: success={success}")
|
|
92
|
+
|
|
93
|
+
# Create outcome node and link to memory
|
|
94
|
+
outcome_id = f"outcome_{datetime.now().timestamp()}"
|
|
95
|
+
|
|
96
|
+
create_outcome_query = """
|
|
97
|
+
MATCH (m:Memory {id: $memory_id})
|
|
98
|
+
CREATE (o:Outcome {
|
|
99
|
+
id: $outcome_id,
|
|
100
|
+
memory_id: $memory_id,
|
|
101
|
+
success: $success,
|
|
102
|
+
description: $description,
|
|
103
|
+
context: $context,
|
|
104
|
+
timestamp: datetime($timestamp),
|
|
105
|
+
impact: $impact
|
|
106
|
+
})
|
|
107
|
+
CREATE (m)-[:RESULTED_IN]->(o)
|
|
108
|
+
RETURN o.id as id
|
|
109
|
+
"""
|
|
110
|
+
|
|
111
|
+
try:
|
|
112
|
+
result = await backend.execute_query(
|
|
113
|
+
create_outcome_query,
|
|
114
|
+
{
|
|
115
|
+
"memory_id": memory_id,
|
|
116
|
+
"outcome_id": outcome_id,
|
|
117
|
+
"success": success,
|
|
118
|
+
"description": outcome_description,
|
|
119
|
+
"context": str(context) if context else None,
|
|
120
|
+
"timestamp": datetime.now().isoformat(),
|
|
121
|
+
"impact": impact,
|
|
122
|
+
}
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
if not result:
|
|
126
|
+
logger.error(f"Failed to create outcome for memory {memory_id}")
|
|
127
|
+
return False
|
|
128
|
+
|
|
129
|
+
logger.debug(f"Created outcome {outcome_id}")
|
|
130
|
+
|
|
131
|
+
# Update memory effectiveness score
|
|
132
|
+
await _update_memory_effectiveness(backend, memory_id, success, impact)
|
|
133
|
+
|
|
134
|
+
# If this is a pattern or solution, propagate to related patterns
|
|
135
|
+
await _propagate_to_patterns(backend, memory_id, success, impact)
|
|
136
|
+
|
|
137
|
+
return True
|
|
138
|
+
|
|
139
|
+
except Exception as e:
|
|
140
|
+
logger.error(f"Error recording outcome: {e}")
|
|
141
|
+
return False
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
async def _update_memory_effectiveness(
|
|
145
|
+
backend: GraphBackend,
|
|
146
|
+
memory_id: str,
|
|
147
|
+
success: bool,
|
|
148
|
+
impact: float,
|
|
149
|
+
) -> None:
|
|
150
|
+
"""
|
|
151
|
+
Update effectiveness score for a memory based on outcome.
|
|
152
|
+
|
|
153
|
+
Uses Bayesian updating to incorporate new evidence.
|
|
154
|
+
"""
|
|
155
|
+
logger.debug(f"Updating effectiveness for memory {memory_id}")
|
|
156
|
+
|
|
157
|
+
# Get current statistics
|
|
158
|
+
stats_query = """
|
|
159
|
+
MATCH (m:Memory {id: $memory_id})
|
|
160
|
+
OPTIONAL MATCH (m)-[:RESULTED_IN]->(o:Outcome)
|
|
161
|
+
RETURN m.effectiveness as current_effectiveness,
|
|
162
|
+
m.usage_count as usage_count,
|
|
163
|
+
count(o) as total_outcomes,
|
|
164
|
+
sum(CASE WHEN o.success THEN 1 ELSE 0 END) as successful_outcomes
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
try:
|
|
168
|
+
result = await backend.execute_query(stats_query, {"memory_id": memory_id})
|
|
169
|
+
|
|
170
|
+
if not result:
|
|
171
|
+
return
|
|
172
|
+
|
|
173
|
+
record = result[0]
|
|
174
|
+
current_effectiveness = record.get("current_effectiveness", 0.5)
|
|
175
|
+
usage_count = record.get("usage_count", 0)
|
|
176
|
+
total_outcomes = record.get("total_outcomes", 0)
|
|
177
|
+
successful_outcomes = record.get("successful_outcomes", 0)
|
|
178
|
+
|
|
179
|
+
# Calculate new effectiveness using weighted average
|
|
180
|
+
# Recent outcomes weighted more heavily
|
|
181
|
+
if total_outcomes > 0:
|
|
182
|
+
success_rate = successful_outcomes / total_outcomes
|
|
183
|
+
|
|
184
|
+
# Weighted blend: recent outcome has impact, historical has (1-impact)
|
|
185
|
+
new_effectiveness = (success_rate * (1 - impact)) + (1.0 if success else 0.0) * impact
|
|
186
|
+
else:
|
|
187
|
+
new_effectiveness = 1.0 if success else 0.0
|
|
188
|
+
|
|
189
|
+
# Clamp to [0, 1]
|
|
190
|
+
new_effectiveness = max(0.0, min(1.0, new_effectiveness))
|
|
191
|
+
|
|
192
|
+
# Calculate confidence based on number of outcomes
|
|
193
|
+
# More outcomes = higher confidence
|
|
194
|
+
confidence = min(0.9, 0.3 + (total_outcomes / 20.0) * 0.6)
|
|
195
|
+
|
|
196
|
+
# Update memory
|
|
197
|
+
update_query = """
|
|
198
|
+
MATCH (m:Memory {id: $memory_id})
|
|
199
|
+
SET m.effectiveness = $effectiveness,
|
|
200
|
+
m.confidence = $confidence,
|
|
201
|
+
m.usage_count = $usage_count + 1,
|
|
202
|
+
m.last_accessed = datetime($timestamp)
|
|
203
|
+
RETURN m.effectiveness as effectiveness
|
|
204
|
+
"""
|
|
205
|
+
|
|
206
|
+
await backend.execute_query(
|
|
207
|
+
update_query,
|
|
208
|
+
{
|
|
209
|
+
"memory_id": memory_id,
|
|
210
|
+
"effectiveness": new_effectiveness,
|
|
211
|
+
"confidence": confidence,
|
|
212
|
+
"usage_count": usage_count,
|
|
213
|
+
"timestamp": datetime.now().isoformat(),
|
|
214
|
+
}
|
|
215
|
+
)
|
|
216
|
+
|
|
217
|
+
logger.info(f"Updated effectiveness for {memory_id}: {new_effectiveness:.2f} (confidence: {confidence:.2f})")
|
|
218
|
+
|
|
219
|
+
except Exception as e:
|
|
220
|
+
logger.error(f"Error updating memory effectiveness: {e}")
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
async def _propagate_to_patterns(
|
|
224
|
+
backend: GraphBackend,
|
|
225
|
+
memory_id: str,
|
|
226
|
+
success: bool,
|
|
227
|
+
impact: float,
|
|
228
|
+
) -> None:
|
|
229
|
+
"""
|
|
230
|
+
Propagate outcome learning to related patterns.
|
|
231
|
+
|
|
232
|
+
If a solution worked, increase confidence in related patterns.
|
|
233
|
+
"""
|
|
234
|
+
logger.debug(f"Propagating outcome to related patterns for memory {memory_id}")
|
|
235
|
+
|
|
236
|
+
# Find patterns this memory is derived from or related to
|
|
237
|
+
pattern_query = """
|
|
238
|
+
MATCH (m:Memory {id: $memory_id})
|
|
239
|
+
MATCH (m)-[:DERIVED_FROM|USES|APPLIES]->(p:Memory {type: 'code_pattern'})
|
|
240
|
+
RETURN p.id as pattern_id, p.effectiveness as effectiveness
|
|
241
|
+
"""
|
|
242
|
+
|
|
243
|
+
try:
|
|
244
|
+
results = await backend.execute_query(pattern_query, {"memory_id": memory_id})
|
|
245
|
+
|
|
246
|
+
for record in results:
|
|
247
|
+
pattern_id = record["pattern_id"]
|
|
248
|
+
await update_pattern_effectiveness(backend, pattern_id, success, impact * 0.5)
|
|
249
|
+
|
|
250
|
+
except Exception as e:
|
|
251
|
+
logger.error(f"Error propagating to patterns: {e}")
|
|
252
|
+
|
|
253
|
+
|
|
254
|
+
async def update_pattern_effectiveness(
|
|
255
|
+
backend: GraphBackend,
|
|
256
|
+
pattern_id: str,
|
|
257
|
+
success: bool,
|
|
258
|
+
impact: float = 1.0,
|
|
259
|
+
) -> bool:
|
|
260
|
+
"""
|
|
261
|
+
Update effectiveness of a pattern based on usage outcome.
|
|
262
|
+
|
|
263
|
+
Args:
|
|
264
|
+
backend: Database backend
|
|
265
|
+
pattern_id: ID of pattern to update
|
|
266
|
+
success: Whether usage was successful
|
|
267
|
+
impact: Impact weight (0.0 to 1.0)
|
|
268
|
+
|
|
269
|
+
Returns:
|
|
270
|
+
True if updated successfully
|
|
271
|
+
|
|
272
|
+
Example:
|
|
273
|
+
>>> await update_pattern_effectiveness(backend, "pattern_123", success=True, impact=0.8)
|
|
274
|
+
"""
|
|
275
|
+
logger.info(f"Updating pattern {pattern_id} effectiveness: success={success}, impact={impact}")
|
|
276
|
+
|
|
277
|
+
# Get pattern statistics
|
|
278
|
+
stats_query = """
|
|
279
|
+
MATCH (p:Memory {id: $pattern_id, type: 'code_pattern'})
|
|
280
|
+
OPTIONAL MATCH (p)-[:DERIVED_FROM|USES|APPLIES]-(m:Memory)-[:RESULTED_IN]->(o:Outcome)
|
|
281
|
+
RETURN p.effectiveness as current_effectiveness,
|
|
282
|
+
p.confidence as current_confidence,
|
|
283
|
+
p.usage_count as usage_count,
|
|
284
|
+
count(o) as total_outcomes,
|
|
285
|
+
sum(CASE WHEN o.success THEN 1 ELSE 0 END) as successful_outcomes
|
|
286
|
+
"""
|
|
287
|
+
|
|
288
|
+
try:
|
|
289
|
+
result = await backend.execute_query(stats_query, {"pattern_id": pattern_id})
|
|
290
|
+
|
|
291
|
+
if not result:
|
|
292
|
+
logger.warning(f"Pattern {pattern_id} not found")
|
|
293
|
+
return False
|
|
294
|
+
|
|
295
|
+
record = result[0]
|
|
296
|
+
current_effectiveness = record.get("current_effectiveness", 0.5)
|
|
297
|
+
current_confidence = record.get("current_confidence", 0.5)
|
|
298
|
+
usage_count = record.get("usage_count", 0)
|
|
299
|
+
total_outcomes = record.get("total_outcomes", 0)
|
|
300
|
+
successful_outcomes = record.get("successful_outcomes", 0)
|
|
301
|
+
|
|
302
|
+
# Calculate new effectiveness with dampening
|
|
303
|
+
# Patterns should change more slowly than individual solutions
|
|
304
|
+
dampening = 0.3 # Patterns change 30% as fast as solutions
|
|
305
|
+
|
|
306
|
+
if total_outcomes > 0:
|
|
307
|
+
success_rate = successful_outcomes / total_outcomes
|
|
308
|
+
adjustment = ((1.0 if success else 0.0) - success_rate) * impact * dampening
|
|
309
|
+
else:
|
|
310
|
+
adjustment = ((1.0 if success else 0.0) - current_effectiveness) * impact * dampening
|
|
311
|
+
|
|
312
|
+
new_effectiveness = current_effectiveness + adjustment
|
|
313
|
+
new_effectiveness = max(0.0, min(1.0, new_effectiveness))
|
|
314
|
+
|
|
315
|
+
# Increase confidence slightly
|
|
316
|
+
new_confidence = min(0.95, current_confidence + 0.02)
|
|
317
|
+
|
|
318
|
+
# Update pattern
|
|
319
|
+
update_query = """
|
|
320
|
+
MATCH (p:Memory {id: $pattern_id})
|
|
321
|
+
SET p.effectiveness = $effectiveness,
|
|
322
|
+
p.confidence = $confidence,
|
|
323
|
+
p.usage_count = p.usage_count + 1,
|
|
324
|
+
p.last_accessed = datetime($timestamp)
|
|
325
|
+
RETURN p.effectiveness as effectiveness
|
|
326
|
+
"""
|
|
327
|
+
|
|
328
|
+
await backend.execute_query(
|
|
329
|
+
update_query,
|
|
330
|
+
{
|
|
331
|
+
"pattern_id": pattern_id,
|
|
332
|
+
"effectiveness": new_effectiveness,
|
|
333
|
+
"confidence": new_confidence,
|
|
334
|
+
"timestamp": datetime.now().isoformat(),
|
|
335
|
+
}
|
|
336
|
+
)
|
|
337
|
+
|
|
338
|
+
logger.info(f"Updated pattern {pattern_id} effectiveness: {new_effectiveness:.2f}")
|
|
339
|
+
return True
|
|
340
|
+
|
|
341
|
+
except Exception as e:
|
|
342
|
+
logger.error(f"Error updating pattern effectiveness: {e}")
|
|
343
|
+
return False
|
|
344
|
+
|
|
345
|
+
|
|
346
|
+
async def calculate_effectiveness_score(
|
|
347
|
+
backend: GraphBackend,
|
|
348
|
+
memory_id: str,
|
|
349
|
+
) -> Optional[EffectivenessScore]:
|
|
350
|
+
"""
|
|
351
|
+
Calculate detailed effectiveness score for a memory.
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
backend: Database backend
|
|
355
|
+
memory_id: ID of memory to analyze
|
|
356
|
+
|
|
357
|
+
Returns:
|
|
358
|
+
EffectivenessScore with statistics, or None if not found
|
|
359
|
+
|
|
360
|
+
Example:
|
|
361
|
+
>>> score = await calculate_effectiveness_score(backend, "solution_123")
|
|
362
|
+
>>> print(f"Success rate: {score.successful_uses / score.total_uses}")
|
|
363
|
+
"""
|
|
364
|
+
logger.debug(f"Calculating effectiveness score for {memory_id}")
|
|
365
|
+
|
|
366
|
+
query = """
|
|
367
|
+
MATCH (m:Memory {id: $memory_id})
|
|
368
|
+
OPTIONAL MATCH (m)-[:RESULTED_IN]->(o:Outcome)
|
|
369
|
+
RETURN m.effectiveness as effectiveness,
|
|
370
|
+
m.confidence as confidence,
|
|
371
|
+
count(o) as total_outcomes,
|
|
372
|
+
sum(CASE WHEN o.success THEN 1 ELSE 0 END) as successful_outcomes,
|
|
373
|
+
sum(CASE WHEN NOT o.success THEN 1 ELSE 0 END) as failed_outcomes
|
|
374
|
+
"""
|
|
375
|
+
|
|
376
|
+
try:
|
|
377
|
+
result = await backend.execute_query(query, {"memory_id": memory_id})
|
|
378
|
+
|
|
379
|
+
if not result:
|
|
380
|
+
return None
|
|
381
|
+
|
|
382
|
+
record = result[0]
|
|
383
|
+
|
|
384
|
+
return EffectivenessScore(
|
|
385
|
+
memory_id=memory_id,
|
|
386
|
+
total_uses=record.get("total_outcomes", 0),
|
|
387
|
+
successful_uses=record.get("successful_outcomes", 0),
|
|
388
|
+
failed_uses=record.get("failed_outcomes", 0),
|
|
389
|
+
effectiveness=record.get("effectiveness", 0.5),
|
|
390
|
+
confidence=record.get("confidence", 0.5),
|
|
391
|
+
last_updated=datetime.now(),
|
|
392
|
+
)
|
|
393
|
+
|
|
394
|
+
except Exception as e:
|
|
395
|
+
logger.error(f"Error calculating effectiveness score: {e}")
|
|
396
|
+
return None
|
|
397
|
+
|
|
398
|
+
|
|
399
|
+
def design_decay_mechanism() -> Dict[str, Any]:
|
|
400
|
+
"""
|
|
401
|
+
Design for effectiveness decay mechanism (not yet implemented).
|
|
402
|
+
|
|
403
|
+
Old outcomes should have less weight over time. This function documents
|
|
404
|
+
the design for a background job that would implement decay.
|
|
405
|
+
|
|
406
|
+
Returns:
|
|
407
|
+
Design specification for decay mechanism
|
|
408
|
+
|
|
409
|
+
Design:
|
|
410
|
+
- Run periodically (e.g., weekly)
|
|
411
|
+
- For each memory with outcomes:
|
|
412
|
+
- Calculate time-weighted effectiveness
|
|
413
|
+
- Recent outcomes weighted more heavily
|
|
414
|
+
- Very old outcomes (>1 year) have minimal weight
|
|
415
|
+
- Use exponential decay: weight = e^(-age_in_days / half_life)
|
|
416
|
+
- Recommended half_life: 180 days (6 months)
|
|
417
|
+
|
|
418
|
+
Example implementation:
|
|
419
|
+
```python
|
|
420
|
+
async def decay_effectiveness_scores(backend: GraphBackend):
|
|
421
|
+
query = '''
|
|
422
|
+
MATCH (m:Memory)-[:RESULTED_IN]->(o:Outcome)
|
|
423
|
+
WITH m, o,
|
|
424
|
+
duration.between(o.timestamp, datetime()).days as age_days
|
|
425
|
+
WITH m,
|
|
426
|
+
sum(o.impact * exp(-age_days / 180.0) *
|
|
427
|
+
(CASE WHEN o.success THEN 1.0 ELSE 0.0 END)) as weighted_success,
|
|
428
|
+
sum(o.impact * exp(-age_days / 180.0)) as weighted_total
|
|
429
|
+
SET m.effectiveness = weighted_success / weighted_total,
|
|
430
|
+
m.last_decayed = datetime()
|
|
431
|
+
'''
|
|
432
|
+
await backend.execute_query(query, {})
|
|
433
|
+
```
|
|
434
|
+
"""
|
|
435
|
+
return {
|
|
436
|
+
"mechanism": "exponential_decay",
|
|
437
|
+
"half_life_days": 180,
|
|
438
|
+
"decay_function": "weight = exp(-age_in_days / half_life)",
|
|
439
|
+
"run_frequency": "weekly",
|
|
440
|
+
"implementation": "background_job",
|
|
441
|
+
"status": "designed_not_implemented",
|
|
442
|
+
"priority": "medium",
|
|
443
|
+
"estimated_effort": "4 hours",
|
|
444
|
+
}
|