memorygraphMCP 0.11.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memorygraph/__init__.py +50 -0
- memorygraph/__main__.py +12 -0
- memorygraph/advanced_tools.py +509 -0
- memorygraph/analytics/__init__.py +46 -0
- memorygraph/analytics/advanced_queries.py +727 -0
- memorygraph/backends/__init__.py +21 -0
- memorygraph/backends/base.py +179 -0
- memorygraph/backends/cloud.py +75 -0
- memorygraph/backends/cloud_backend.py +858 -0
- memorygraph/backends/factory.py +577 -0
- memorygraph/backends/falkordb_backend.py +749 -0
- memorygraph/backends/falkordblite_backend.py +746 -0
- memorygraph/backends/ladybugdb_backend.py +242 -0
- memorygraph/backends/memgraph_backend.py +327 -0
- memorygraph/backends/neo4j_backend.py +298 -0
- memorygraph/backends/sqlite_fallback.py +463 -0
- memorygraph/backends/turso.py +448 -0
- memorygraph/cli.py +743 -0
- memorygraph/cloud_database.py +297 -0
- memorygraph/config.py +295 -0
- memorygraph/database.py +933 -0
- memorygraph/graph_analytics.py +631 -0
- memorygraph/integration/__init__.py +69 -0
- memorygraph/integration/context_capture.py +426 -0
- memorygraph/integration/project_analysis.py +583 -0
- memorygraph/integration/workflow_tracking.py +492 -0
- memorygraph/intelligence/__init__.py +59 -0
- memorygraph/intelligence/context_retrieval.py +447 -0
- memorygraph/intelligence/entity_extraction.py +386 -0
- memorygraph/intelligence/pattern_recognition.py +420 -0
- memorygraph/intelligence/temporal.py +374 -0
- memorygraph/migration/__init__.py +27 -0
- memorygraph/migration/manager.py +579 -0
- memorygraph/migration/models.py +142 -0
- memorygraph/migration/scripts/__init__.py +17 -0
- memorygraph/migration/scripts/bitemporal_migration.py +595 -0
- memorygraph/migration/scripts/multitenancy_migration.py +452 -0
- memorygraph/migration_tools_module.py +146 -0
- memorygraph/models.py +684 -0
- memorygraph/proactive/__init__.py +46 -0
- memorygraph/proactive/outcome_learning.py +444 -0
- memorygraph/proactive/predictive.py +410 -0
- memorygraph/proactive/session_briefing.py +399 -0
- memorygraph/relationships.py +668 -0
- memorygraph/server.py +883 -0
- memorygraph/sqlite_database.py +1876 -0
- memorygraph/tools/__init__.py +59 -0
- memorygraph/tools/activity_tools.py +262 -0
- memorygraph/tools/memory_tools.py +315 -0
- memorygraph/tools/migration_tools.py +181 -0
- memorygraph/tools/relationship_tools.py +147 -0
- memorygraph/tools/search_tools.py +406 -0
- memorygraph/tools/temporal_tools.py +339 -0
- memorygraph/utils/__init__.py +10 -0
- memorygraph/utils/context_extractor.py +429 -0
- memorygraph/utils/error_handling.py +151 -0
- memorygraph/utils/export_import.py +425 -0
- memorygraph/utils/graph_algorithms.py +200 -0
- memorygraph/utils/pagination.py +149 -0
- memorygraph/utils/project_detection.py +133 -0
- memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
- memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
- memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
- memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
- memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,426 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Development Context Capture for Claude Code Integration.
|
|
3
|
+
|
|
4
|
+
Automatically captures development context from Claude Code sessions including:
|
|
5
|
+
- Task context (description, goals, files involved)
|
|
6
|
+
- Command executions (commands run, results, errors)
|
|
7
|
+
- Error pattern analysis (recurring errors, solutions)
|
|
8
|
+
- Solution effectiveness tracking
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
import re
|
|
12
|
+
from datetime import datetime
|
|
13
|
+
from typing import Any, Optional
|
|
14
|
+
from uuid import uuid4
|
|
15
|
+
|
|
16
|
+
from pydantic import BaseModel, Field
|
|
17
|
+
|
|
18
|
+
from ..backends.base import GraphBackend
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
class TaskContext(BaseModel):
|
|
22
|
+
"""Task context information."""
|
|
23
|
+
|
|
24
|
+
task_id: str = Field(default_factory=lambda: str(uuid4()))
|
|
25
|
+
description: str = Field(..., description="Task description")
|
|
26
|
+
goals: list[str] = Field(default_factory=list, description="Task goals")
|
|
27
|
+
files_involved: list[str] = Field(default_factory=list, description="Files involved in task")
|
|
28
|
+
start_time: datetime = Field(default_factory=datetime.now)
|
|
29
|
+
end_time: Optional[datetime] = None
|
|
30
|
+
success: Optional[bool] = None
|
|
31
|
+
notes: Optional[str] = None
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class CommandExecution(BaseModel):
|
|
35
|
+
"""Command execution information."""
|
|
36
|
+
|
|
37
|
+
command_id: str = Field(default_factory=lambda: str(uuid4()))
|
|
38
|
+
command: str = Field(..., description="Command executed")
|
|
39
|
+
output: str = Field(default="", description="Command output")
|
|
40
|
+
error: Optional[str] = Field(None, description="Error message if failed")
|
|
41
|
+
success: bool = Field(..., description="Whether command succeeded")
|
|
42
|
+
timestamp: datetime = Field(default_factory=datetime.now)
|
|
43
|
+
task_id: Optional[str] = Field(None, description="Associated task ID")
|
|
44
|
+
|
|
45
|
+
|
|
46
|
+
class ErrorPattern(BaseModel):
|
|
47
|
+
"""Error pattern identified from commands."""
|
|
48
|
+
|
|
49
|
+
pattern_id: str = Field(default_factory=lambda: str(uuid4()))
|
|
50
|
+
error_type: str = Field(..., description="Type of error")
|
|
51
|
+
error_message: str = Field(..., description="Error message pattern")
|
|
52
|
+
frequency: int = Field(default=1, description="Number of occurrences")
|
|
53
|
+
solutions_tried: list[str] = Field(default_factory=list, description="Solutions attempted")
|
|
54
|
+
successful_solutions: list[str] = Field(
|
|
55
|
+
default_factory=list, description="Solutions that worked"
|
|
56
|
+
)
|
|
57
|
+
context: dict[str, Any] = Field(default_factory=dict, description="Additional context")
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
# Security filters for sensitive data
|
|
61
|
+
SENSITIVE_PATTERNS = [
|
|
62
|
+
r"(?i)(api[_-]?key|token|password|secret|auth)[=:\s]+['\"]?[\w\-\.]+['\"]?",
|
|
63
|
+
r"(?i)bearer\s+[\w\-\.]+", # Bearer tokens
|
|
64
|
+
r"(?i)(aws|gcp|azure)[_-]?(access|secret)[_-]?key[=:\s]+['\"]?[\w\-\.]+['\"]?",
|
|
65
|
+
r"-----BEGIN (RSA |EC )?PRIVATE KEY-----",
|
|
66
|
+
r"(?:https?://)?[\w\-]+:[\w\-]+@", # URLs with credentials
|
|
67
|
+
r"\b[\w\-\.]+@[\w\-\.]+\.(com|net|org|io|dev)\b", # Email addresses (PII)
|
|
68
|
+
]
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
def _sanitize_content(content: str) -> str:
|
|
72
|
+
"""
|
|
73
|
+
Sanitize content by removing sensitive information.
|
|
74
|
+
|
|
75
|
+
Args:
|
|
76
|
+
content: Content to sanitize
|
|
77
|
+
|
|
78
|
+
Returns:
|
|
79
|
+
Sanitized content
|
|
80
|
+
"""
|
|
81
|
+
sanitized = content
|
|
82
|
+
for pattern in SENSITIVE_PATTERNS:
|
|
83
|
+
sanitized = re.sub(pattern, "[REDACTED]", sanitized)
|
|
84
|
+
return sanitized
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
async def capture_task_context(
|
|
88
|
+
backend: GraphBackend,
|
|
89
|
+
description: str,
|
|
90
|
+
goals: list[str],
|
|
91
|
+
files: Optional[list[str]] = None,
|
|
92
|
+
project_id: Optional[str] = None,
|
|
93
|
+
) -> str:
|
|
94
|
+
"""
|
|
95
|
+
Capture task context and store as memory.
|
|
96
|
+
|
|
97
|
+
Args:
|
|
98
|
+
backend: Database backend
|
|
99
|
+
description: Task description
|
|
100
|
+
goals: List of task goals
|
|
101
|
+
files: List of files involved (optional)
|
|
102
|
+
project_id: Project ID (optional)
|
|
103
|
+
|
|
104
|
+
Returns:
|
|
105
|
+
Memory ID of stored task context
|
|
106
|
+
|
|
107
|
+
Example:
|
|
108
|
+
>>> memory_id = await capture_task_context(
|
|
109
|
+
... backend,
|
|
110
|
+
... "Add dark mode toggle to settings",
|
|
111
|
+
... ["Create toggle component", "Add state management", "Update styling"],
|
|
112
|
+
... files=["src/components/Settings.tsx", "src/context/ThemeContext.tsx"],
|
|
113
|
+
... project_id="my-app"
|
|
114
|
+
... )
|
|
115
|
+
"""
|
|
116
|
+
# Sanitize inputs
|
|
117
|
+
description = _sanitize_content(description)
|
|
118
|
+
goals = [_sanitize_content(goal) for goal in goals]
|
|
119
|
+
files = [_sanitize_content(f) for f in (files or [])]
|
|
120
|
+
|
|
121
|
+
task_context = TaskContext(
|
|
122
|
+
description=description, goals=goals, files_involved=files or []
|
|
123
|
+
)
|
|
124
|
+
|
|
125
|
+
# Store as memory
|
|
126
|
+
properties = {
|
|
127
|
+
"id": task_context.task_id,
|
|
128
|
+
"type": "task",
|
|
129
|
+
"title": f"Task: {description[:100]}",
|
|
130
|
+
"content": f"Description: {description}\n\nGoals:\n"
|
|
131
|
+
+ "\n".join(f"- {goal}" for goal in goals),
|
|
132
|
+
"context": {
|
|
133
|
+
"goals": goals,
|
|
134
|
+
"files": files or [],
|
|
135
|
+
"start_time": task_context.start_time.isoformat(),
|
|
136
|
+
},
|
|
137
|
+
"created_at": datetime.now(),
|
|
138
|
+
"updated_at": datetime.now(),
|
|
139
|
+
}
|
|
140
|
+
|
|
141
|
+
# Add project relationship if provided
|
|
142
|
+
if project_id:
|
|
143
|
+
properties["context"]["project_id"] = project_id
|
|
144
|
+
|
|
145
|
+
memory_id = await backend.store_node("Memory", properties)
|
|
146
|
+
|
|
147
|
+
# Create relationships to file entities
|
|
148
|
+
for file_path in files or []:
|
|
149
|
+
# Create or get file entity
|
|
150
|
+
file_entity = await backend.execute_query(
|
|
151
|
+
"""
|
|
152
|
+
MERGE (f:Entity {name: $file_path, type: 'file'})
|
|
153
|
+
ON CREATE SET f.id = $file_id, f.created_at = datetime()
|
|
154
|
+
RETURN f.id as id
|
|
155
|
+
""",
|
|
156
|
+
{"file_path": file_path, "file_id": str(uuid4())},
|
|
157
|
+
)
|
|
158
|
+
|
|
159
|
+
if file_entity:
|
|
160
|
+
file_id = file_entity[0]["id"]
|
|
161
|
+
# Link task to file
|
|
162
|
+
await backend.store_relationship(
|
|
163
|
+
memory_id,
|
|
164
|
+
file_id,
|
|
165
|
+
"INVOLVES",
|
|
166
|
+
{"created_at": datetime.now(), "strength": 1.0},
|
|
167
|
+
)
|
|
168
|
+
|
|
169
|
+
# Link to project if provided
|
|
170
|
+
if project_id:
|
|
171
|
+
await backend.store_relationship(
|
|
172
|
+
memory_id,
|
|
173
|
+
project_id,
|
|
174
|
+
"PART_OF",
|
|
175
|
+
{"created_at": datetime.now(), "strength": 1.0},
|
|
176
|
+
)
|
|
177
|
+
|
|
178
|
+
return memory_id
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
async def capture_command_execution(
|
|
182
|
+
backend: GraphBackend,
|
|
183
|
+
command: str,
|
|
184
|
+
output: str = "",
|
|
185
|
+
error: Optional[str] = None,
|
|
186
|
+
success: bool = True,
|
|
187
|
+
task_id: Optional[str] = None,
|
|
188
|
+
) -> str:
|
|
189
|
+
"""
|
|
190
|
+
Capture command execution and store as observation memory.
|
|
191
|
+
|
|
192
|
+
Args:
|
|
193
|
+
backend: Database backend
|
|
194
|
+
command: Command executed
|
|
195
|
+
output: Command output
|
|
196
|
+
error: Error message if failed
|
|
197
|
+
success: Whether command succeeded
|
|
198
|
+
task_id: Associated task ID (optional)
|
|
199
|
+
|
|
200
|
+
Returns:
|
|
201
|
+
Memory ID of stored command execution
|
|
202
|
+
|
|
203
|
+
Example:
|
|
204
|
+
>>> memory_id = await capture_command_execution(
|
|
205
|
+
... backend,
|
|
206
|
+
... "npm run build",
|
|
207
|
+
... output="Build completed successfully",
|
|
208
|
+
... success=True,
|
|
209
|
+
... task_id="task-123"
|
|
210
|
+
... )
|
|
211
|
+
"""
|
|
212
|
+
# Sanitize inputs
|
|
213
|
+
command = _sanitize_content(command)
|
|
214
|
+
output = _sanitize_content(output)
|
|
215
|
+
if error:
|
|
216
|
+
error = _sanitize_content(error)
|
|
217
|
+
|
|
218
|
+
cmd_exec = CommandExecution(
|
|
219
|
+
command=command, output=output, error=error, success=success, task_id=task_id
|
|
220
|
+
)
|
|
221
|
+
|
|
222
|
+
# Store as observation memory
|
|
223
|
+
properties = {
|
|
224
|
+
"id": cmd_exec.command_id,
|
|
225
|
+
"type": "observation",
|
|
226
|
+
"title": f"Command: {command[:100]}",
|
|
227
|
+
"content": f"Command: {command}\n\n"
|
|
228
|
+
f"Success: {success}\n\n"
|
|
229
|
+
f"Output:\n{output[:500]}"
|
|
230
|
+
+ (f"\n\nError:\n{error[:500]}" if error else ""),
|
|
231
|
+
"context": {
|
|
232
|
+
"command": command,
|
|
233
|
+
"success": success,
|
|
234
|
+
"has_error": bool(error),
|
|
235
|
+
"timestamp": cmd_exec.timestamp.isoformat(),
|
|
236
|
+
},
|
|
237
|
+
"created_at": datetime.now(),
|
|
238
|
+
"updated_at": datetime.now(),
|
|
239
|
+
}
|
|
240
|
+
|
|
241
|
+
if task_id:
|
|
242
|
+
properties["context"]["task_id"] = task_id
|
|
243
|
+
|
|
244
|
+
memory_id = await backend.store_node("Memory", properties)
|
|
245
|
+
|
|
246
|
+
# Link to task if provided
|
|
247
|
+
if task_id:
|
|
248
|
+
await backend.store_relationship(
|
|
249
|
+
memory_id,
|
|
250
|
+
task_id,
|
|
251
|
+
"EXECUTED_IN",
|
|
252
|
+
{"created_at": datetime.now(), "strength": 1.0},
|
|
253
|
+
)
|
|
254
|
+
|
|
255
|
+
# Extract and link errors if present
|
|
256
|
+
if error and not success:
|
|
257
|
+
error_patterns = await analyze_error_patterns(backend, error)
|
|
258
|
+
for pattern_id in error_patterns:
|
|
259
|
+
await backend.store_relationship(
|
|
260
|
+
memory_id,
|
|
261
|
+
pattern_id,
|
|
262
|
+
"EXHIBITS",
|
|
263
|
+
{"created_at": datetime.now(), "strength": 0.9},
|
|
264
|
+
)
|
|
265
|
+
|
|
266
|
+
return memory_id
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
async def analyze_error_patterns(backend: GraphBackend, error: str) -> list[str]:
|
|
270
|
+
"""
|
|
271
|
+
Analyze error message and identify patterns.
|
|
272
|
+
|
|
273
|
+
Args:
|
|
274
|
+
backend: Database backend
|
|
275
|
+
error: Error message
|
|
276
|
+
|
|
277
|
+
Returns:
|
|
278
|
+
List of pattern memory IDs
|
|
279
|
+
|
|
280
|
+
Example:
|
|
281
|
+
>>> pattern_ids = await analyze_error_patterns(
|
|
282
|
+
... backend,
|
|
283
|
+
... "TypeError: Cannot read property 'map' of undefined"
|
|
284
|
+
... )
|
|
285
|
+
"""
|
|
286
|
+
# Sanitize error
|
|
287
|
+
error = _sanitize_content(error)
|
|
288
|
+
|
|
289
|
+
# Extract error type
|
|
290
|
+
error_type = "unknown"
|
|
291
|
+
type_match = re.search(r"^(\w+Error|\w+Exception):", error)
|
|
292
|
+
if type_match:
|
|
293
|
+
error_type = type_match.group(1)
|
|
294
|
+
|
|
295
|
+
# Search for existing error pattern
|
|
296
|
+
existing_patterns = await backend.search_nodes(
|
|
297
|
+
"Memory",
|
|
298
|
+
{
|
|
299
|
+
"type": "error_pattern",
|
|
300
|
+
"context.error_type": error_type,
|
|
301
|
+
},
|
|
302
|
+
)
|
|
303
|
+
|
|
304
|
+
pattern_ids = []
|
|
305
|
+
|
|
306
|
+
if existing_patterns:
|
|
307
|
+
# Update existing pattern
|
|
308
|
+
for pattern in existing_patterns:
|
|
309
|
+
pattern_id = pattern["id"]
|
|
310
|
+
# Increment frequency
|
|
311
|
+
await backend.execute_query(
|
|
312
|
+
"""
|
|
313
|
+
MATCH (m:Memory {id: $pattern_id})
|
|
314
|
+
SET m.context.frequency = COALESCE(m.context.frequency, 0) + 1,
|
|
315
|
+
m.updated_at = datetime()
|
|
316
|
+
""",
|
|
317
|
+
{"pattern_id": pattern_id},
|
|
318
|
+
)
|
|
319
|
+
pattern_ids.append(pattern_id)
|
|
320
|
+
else:
|
|
321
|
+
# Create new pattern
|
|
322
|
+
error_pattern = ErrorPattern(
|
|
323
|
+
error_type=error_type,
|
|
324
|
+
error_message=error[:500], # Truncate long errors
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
properties = {
|
|
328
|
+
"id": error_pattern.pattern_id,
|
|
329
|
+
"type": "error_pattern",
|
|
330
|
+
"title": f"Error Pattern: {error_type}",
|
|
331
|
+
"content": f"Error Type: {error_type}\n\nMessage Pattern:\n{error_pattern.error_message}",
|
|
332
|
+
"context": {
|
|
333
|
+
"error_type": error_type,
|
|
334
|
+
"error_message": error_pattern.error_message,
|
|
335
|
+
"frequency": 1,
|
|
336
|
+
"solutions_tried": [],
|
|
337
|
+
"successful_solutions": [],
|
|
338
|
+
},
|
|
339
|
+
"created_at": datetime.now(),
|
|
340
|
+
"updated_at": datetime.now(),
|
|
341
|
+
}
|
|
342
|
+
|
|
343
|
+
pattern_id = await backend.store_node("Memory", properties)
|
|
344
|
+
pattern_ids.append(pattern_id)
|
|
345
|
+
|
|
346
|
+
return pattern_ids
|
|
347
|
+
|
|
348
|
+
|
|
349
|
+
async def track_solution_effectiveness(
|
|
350
|
+
backend: GraphBackend,
|
|
351
|
+
solution_memory_id: str,
|
|
352
|
+
error_pattern_id: str,
|
|
353
|
+
success: bool,
|
|
354
|
+
notes: Optional[str] = None,
|
|
355
|
+
) -> None:
|
|
356
|
+
"""
|
|
357
|
+
Track effectiveness of a solution for an error pattern.
|
|
358
|
+
|
|
359
|
+
Args:
|
|
360
|
+
backend: Database backend
|
|
361
|
+
solution_memory_id: Memory ID of the solution
|
|
362
|
+
error_pattern_id: Memory ID of the error pattern
|
|
363
|
+
success: Whether the solution worked
|
|
364
|
+
notes: Additional notes (optional)
|
|
365
|
+
|
|
366
|
+
Example:
|
|
367
|
+
>>> await track_solution_effectiveness(
|
|
368
|
+
... backend,
|
|
369
|
+
... solution_id="sol-123",
|
|
370
|
+
... error_pattern_id="err-456",
|
|
371
|
+
... success=True,
|
|
372
|
+
... notes="Fixed by adding null check"
|
|
373
|
+
... )
|
|
374
|
+
"""
|
|
375
|
+
# Create relationship between solution and error pattern
|
|
376
|
+
rel_type = "SOLVES" if success else "ATTEMPTED_SOLUTION"
|
|
377
|
+
|
|
378
|
+
properties = {
|
|
379
|
+
"created_at": datetime.now(),
|
|
380
|
+
"success": success,
|
|
381
|
+
"strength": 1.0 if success else 0.3,
|
|
382
|
+
"confidence": 0.9 if success else 0.5,
|
|
383
|
+
}
|
|
384
|
+
|
|
385
|
+
if notes:
|
|
386
|
+
properties["notes"] = _sanitize_content(notes)
|
|
387
|
+
|
|
388
|
+
await backend.store_relationship(
|
|
389
|
+
solution_memory_id, error_pattern_id, rel_type, properties
|
|
390
|
+
)
|
|
391
|
+
|
|
392
|
+
# Update error pattern with solution info
|
|
393
|
+
if success:
|
|
394
|
+
await backend.execute_query(
|
|
395
|
+
"""
|
|
396
|
+
MATCH (m:Memory {id: $pattern_id})
|
|
397
|
+
SET m.context.successful_solutions =
|
|
398
|
+
COALESCE(m.context.successful_solutions, []) + [$solution_id],
|
|
399
|
+
m.updated_at = datetime()
|
|
400
|
+
""",
|
|
401
|
+
{"pattern_id": error_pattern_id, "solution_id": solution_memory_id},
|
|
402
|
+
)
|
|
403
|
+
else:
|
|
404
|
+
await backend.execute_query(
|
|
405
|
+
"""
|
|
406
|
+
MATCH (m:Memory {id: $pattern_id})
|
|
407
|
+
SET m.context.solutions_tried =
|
|
408
|
+
COALESCE(m.context.solutions_tried, []) + [$solution_id],
|
|
409
|
+
m.updated_at = datetime()
|
|
410
|
+
""",
|
|
411
|
+
{"pattern_id": error_pattern_id, "solution_id": solution_memory_id},
|
|
412
|
+
)
|
|
413
|
+
|
|
414
|
+
# Update solution confidence based on effectiveness
|
|
415
|
+
await backend.execute_query(
|
|
416
|
+
"""
|
|
417
|
+
MATCH (s:Memory {id: $solution_id})
|
|
418
|
+
MATCH (s)-[r:SOLVES|ATTEMPTED_SOLUTION]->(e:Memory {type: 'error_pattern'})
|
|
419
|
+
WITH s, COUNT(r) as total_attempts,
|
|
420
|
+
SUM(CASE WHEN type(r) = 'SOLVES' THEN 1 ELSE 0 END) as successes
|
|
421
|
+
SET s.context.effectiveness = toFloat(successes) / toFloat(total_attempts),
|
|
422
|
+
s.context.total_uses = total_attempts,
|
|
423
|
+
s.updated_at = datetime()
|
|
424
|
+
""",
|
|
425
|
+
{"solution_id": solution_memory_id},
|
|
426
|
+
)
|