memorygraphMCP 0.11.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memorygraph/__init__.py +50 -0
- memorygraph/__main__.py +12 -0
- memorygraph/advanced_tools.py +509 -0
- memorygraph/analytics/__init__.py +46 -0
- memorygraph/analytics/advanced_queries.py +727 -0
- memorygraph/backends/__init__.py +21 -0
- memorygraph/backends/base.py +179 -0
- memorygraph/backends/cloud.py +75 -0
- memorygraph/backends/cloud_backend.py +858 -0
- memorygraph/backends/factory.py +577 -0
- memorygraph/backends/falkordb_backend.py +749 -0
- memorygraph/backends/falkordblite_backend.py +746 -0
- memorygraph/backends/ladybugdb_backend.py +242 -0
- memorygraph/backends/memgraph_backend.py +327 -0
- memorygraph/backends/neo4j_backend.py +298 -0
- memorygraph/backends/sqlite_fallback.py +463 -0
- memorygraph/backends/turso.py +448 -0
- memorygraph/cli.py +743 -0
- memorygraph/cloud_database.py +297 -0
- memorygraph/config.py +295 -0
- memorygraph/database.py +933 -0
- memorygraph/graph_analytics.py +631 -0
- memorygraph/integration/__init__.py +69 -0
- memorygraph/integration/context_capture.py +426 -0
- memorygraph/integration/project_analysis.py +583 -0
- memorygraph/integration/workflow_tracking.py +492 -0
- memorygraph/intelligence/__init__.py +59 -0
- memorygraph/intelligence/context_retrieval.py +447 -0
- memorygraph/intelligence/entity_extraction.py +386 -0
- memorygraph/intelligence/pattern_recognition.py +420 -0
- memorygraph/intelligence/temporal.py +374 -0
- memorygraph/migration/__init__.py +27 -0
- memorygraph/migration/manager.py +579 -0
- memorygraph/migration/models.py +142 -0
- memorygraph/migration/scripts/__init__.py +17 -0
- memorygraph/migration/scripts/bitemporal_migration.py +595 -0
- memorygraph/migration/scripts/multitenancy_migration.py +452 -0
- memorygraph/migration_tools_module.py +146 -0
- memorygraph/models.py +684 -0
- memorygraph/proactive/__init__.py +46 -0
- memorygraph/proactive/outcome_learning.py +444 -0
- memorygraph/proactive/predictive.py +410 -0
- memorygraph/proactive/session_briefing.py +399 -0
- memorygraph/relationships.py +668 -0
- memorygraph/server.py +883 -0
- memorygraph/sqlite_database.py +1876 -0
- memorygraph/tools/__init__.py +59 -0
- memorygraph/tools/activity_tools.py +262 -0
- memorygraph/tools/memory_tools.py +315 -0
- memorygraph/tools/migration_tools.py +181 -0
- memorygraph/tools/relationship_tools.py +147 -0
- memorygraph/tools/search_tools.py +406 -0
- memorygraph/tools/temporal_tools.py +339 -0
- memorygraph/utils/__init__.py +10 -0
- memorygraph/utils/context_extractor.py +429 -0
- memorygraph/utils/error_handling.py +151 -0
- memorygraph/utils/export_import.py +425 -0
- memorygraph/utils/graph_algorithms.py +200 -0
- memorygraph/utils/pagination.py +149 -0
- memorygraph/utils/project_detection.py +133 -0
- memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
- memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
- memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
- memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
- memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,492 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Workflow Memory Tools for Claude Code Integration.
|
|
3
|
+
|
|
4
|
+
Tracks development workflows and provides intelligent suggestions:
|
|
5
|
+
- Workflow action tracking
|
|
6
|
+
- Session state management
|
|
7
|
+
- Workflow suggestions based on past successes
|
|
8
|
+
- Workflow optimization recommendations
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
from typing import Any, Optional
|
|
13
|
+
from uuid import uuid4
|
|
14
|
+
|
|
15
|
+
from pydantic import BaseModel, Field
|
|
16
|
+
|
|
17
|
+
from ..backends.base import GraphBackend
|
|
18
|
+
|
|
19
|
+
|
|
20
|
+
class WorkflowAction(BaseModel):
|
|
21
|
+
"""Workflow action information."""
|
|
22
|
+
|
|
23
|
+
action_id: str = Field(default_factory=lambda: str(uuid4()))
|
|
24
|
+
session_id: str = Field(..., description="Session ID")
|
|
25
|
+
action_type: str = Field(..., description="Type of action (e.g., 'command', 'file_edit', 'search')")
|
|
26
|
+
action_data: dict[str, Any] = Field(default_factory=dict, description="Action-specific data")
|
|
27
|
+
timestamp: datetime = Field(default_factory=datetime.now)
|
|
28
|
+
duration_seconds: Optional[float] = Field(None, description="Duration of action")
|
|
29
|
+
success: bool = Field(default=True, description="Whether action succeeded")
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
class WorkflowSuggestion(BaseModel):
|
|
33
|
+
"""Workflow suggestion."""
|
|
34
|
+
|
|
35
|
+
suggestion_id: str = Field(default_factory=lambda: str(uuid4()))
|
|
36
|
+
workflow_name: str = Field(..., description="Name of suggested workflow")
|
|
37
|
+
description: str = Field(..., description="Description of workflow")
|
|
38
|
+
steps: list[str] = Field(default_factory=list, description="Workflow steps")
|
|
39
|
+
success_rate: float = Field(..., description="Historical success rate")
|
|
40
|
+
relevance_score: float = Field(..., description="Relevance to current context")
|
|
41
|
+
last_used: Optional[datetime] = Field(None, description="When workflow was last used")
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class Recommendation(BaseModel):
|
|
45
|
+
"""Workflow optimization recommendation."""
|
|
46
|
+
|
|
47
|
+
recommendation_id: str = Field(default_factory=lambda: str(uuid4()))
|
|
48
|
+
recommendation_type: str = Field(..., description="Type of recommendation")
|
|
49
|
+
title: str = Field(..., description="Recommendation title")
|
|
50
|
+
description: str = Field(..., description="Detailed description")
|
|
51
|
+
impact: str = Field(..., description="Expected impact (low, medium, high)")
|
|
52
|
+
evidence: list[str] = Field(default_factory=list, description="Supporting evidence")
|
|
53
|
+
|
|
54
|
+
|
|
55
|
+
class SessionState(BaseModel):
|
|
56
|
+
"""Session state information."""
|
|
57
|
+
|
|
58
|
+
session_id: str = Field(..., description="Session ID")
|
|
59
|
+
start_time: datetime = Field(..., description="Session start time")
|
|
60
|
+
last_activity: datetime = Field(..., description="Last activity timestamp")
|
|
61
|
+
current_task: Optional[str] = Field(None, description="Current task description")
|
|
62
|
+
open_problems: list[str] = Field(default_factory=list, description="Unresolved problems")
|
|
63
|
+
next_steps: list[str] = Field(default_factory=list, description="Suggested next steps")
|
|
64
|
+
context: dict[str, Any] = Field(default_factory=dict, description="Session context")
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
async def track_workflow(
|
|
68
|
+
backend: GraphBackend,
|
|
69
|
+
session_id: str,
|
|
70
|
+
action_type: str,
|
|
71
|
+
action_data: dict[str, Any],
|
|
72
|
+
success: bool = True,
|
|
73
|
+
duration_seconds: Optional[float] = None,
|
|
74
|
+
) -> str:
|
|
75
|
+
"""
|
|
76
|
+
Track a workflow action in the current session.
|
|
77
|
+
|
|
78
|
+
Args:
|
|
79
|
+
backend: Database backend
|
|
80
|
+
session_id: Session identifier
|
|
81
|
+
action_type: Type of action
|
|
82
|
+
action_data: Action-specific data
|
|
83
|
+
success: Whether action succeeded
|
|
84
|
+
duration_seconds: Duration of action (optional)
|
|
85
|
+
|
|
86
|
+
Returns:
|
|
87
|
+
Memory ID of tracked action
|
|
88
|
+
|
|
89
|
+
Example:
|
|
90
|
+
>>> memory_id = await track_workflow(
|
|
91
|
+
... backend,
|
|
92
|
+
... session_id="session-123",
|
|
93
|
+
... action_type="command",
|
|
94
|
+
... action_data={"command": "npm test", "exit_code": 0},
|
|
95
|
+
... success=True,
|
|
96
|
+
... duration_seconds=12.5
|
|
97
|
+
... )
|
|
98
|
+
"""
|
|
99
|
+
action = WorkflowAction(
|
|
100
|
+
session_id=session_id,
|
|
101
|
+
action_type=action_type,
|
|
102
|
+
action_data=action_data,
|
|
103
|
+
success=success,
|
|
104
|
+
duration_seconds=duration_seconds,
|
|
105
|
+
)
|
|
106
|
+
|
|
107
|
+
# Store action as observation memory
|
|
108
|
+
properties = {
|
|
109
|
+
"id": action.action_id,
|
|
110
|
+
"type": "workflow_action",
|
|
111
|
+
"title": f"Action: {action_type}",
|
|
112
|
+
"content": f"Session: {session_id}\n"
|
|
113
|
+
f"Action: {action_type}\n"
|
|
114
|
+
f"Success: {success}\n"
|
|
115
|
+
f"Duration: {duration_seconds}s" if duration_seconds else "",
|
|
116
|
+
"context": {
|
|
117
|
+
"session_id": session_id,
|
|
118
|
+
"action_type": action_type,
|
|
119
|
+
"action_data": action_data,
|
|
120
|
+
"success": success,
|
|
121
|
+
"duration_seconds": duration_seconds,
|
|
122
|
+
"timestamp": action.timestamp.isoformat(),
|
|
123
|
+
},
|
|
124
|
+
"created_at": datetime.now(),
|
|
125
|
+
"updated_at": datetime.now(),
|
|
126
|
+
}
|
|
127
|
+
|
|
128
|
+
memory_id = await backend.store_node("Memory", properties)
|
|
129
|
+
|
|
130
|
+
# Create or get session entity
|
|
131
|
+
session_entity = await backend.execute_query(
|
|
132
|
+
"""
|
|
133
|
+
MERGE (s:Entity {id: $session_id, type: 'session'})
|
|
134
|
+
ON CREATE SET s.created_at = datetime(), s.start_time = datetime()
|
|
135
|
+
SET s.last_activity = datetime()
|
|
136
|
+
RETURN s.id as id
|
|
137
|
+
""",
|
|
138
|
+
{"session_id": session_id},
|
|
139
|
+
)
|
|
140
|
+
|
|
141
|
+
if session_entity:
|
|
142
|
+
# Link action to session
|
|
143
|
+
await backend.store_relationship(
|
|
144
|
+
memory_id,
|
|
145
|
+
session_id,
|
|
146
|
+
"IN_SESSION",
|
|
147
|
+
{"created_at": datetime.now(), "strength": 1.0},
|
|
148
|
+
)
|
|
149
|
+
|
|
150
|
+
# Link to previous action (workflow sequence)
|
|
151
|
+
previous_actions = await backend.execute_query(
|
|
152
|
+
"""
|
|
153
|
+
MATCH (a:Memory {type: 'workflow_action'})-[:IN_SESSION]->(s:Entity {id: $session_id})
|
|
154
|
+
WHERE a.id <> $current_id
|
|
155
|
+
RETURN a.id as id, a.created_at as created_at
|
|
156
|
+
ORDER BY a.created_at DESC
|
|
157
|
+
LIMIT 1
|
|
158
|
+
""",
|
|
159
|
+
{"session_id": session_id, "current_id": memory_id},
|
|
160
|
+
)
|
|
161
|
+
|
|
162
|
+
if previous_actions:
|
|
163
|
+
prev_id = previous_actions[0]["id"]
|
|
164
|
+
await backend.store_relationship(
|
|
165
|
+
memory_id,
|
|
166
|
+
prev_id,
|
|
167
|
+
"FOLLOWS",
|
|
168
|
+
{"created_at": datetime.now(), "strength": 0.8},
|
|
169
|
+
)
|
|
170
|
+
|
|
171
|
+
return memory_id
|
|
172
|
+
|
|
173
|
+
|
|
174
|
+
async def suggest_workflow(
|
|
175
|
+
backend: GraphBackend, current_context: dict[str, Any], max_suggestions: int = 5
|
|
176
|
+
) -> list[WorkflowSuggestion]:
|
|
177
|
+
"""
|
|
178
|
+
Suggest workflows based on current context and past successes.
|
|
179
|
+
|
|
180
|
+
Args:
|
|
181
|
+
backend: Database backend
|
|
182
|
+
current_context: Current development context
|
|
183
|
+
max_suggestions: Maximum number of suggestions to return
|
|
184
|
+
|
|
185
|
+
Returns:
|
|
186
|
+
List of workflow suggestions
|
|
187
|
+
|
|
188
|
+
Example:
|
|
189
|
+
>>> suggestions = await suggest_workflow(
|
|
190
|
+
... backend,
|
|
191
|
+
... current_context={"task": "add feature", "files": ["api.py"]},
|
|
192
|
+
... max_suggestions=3
|
|
193
|
+
... )
|
|
194
|
+
>>> for sug in suggestions:
|
|
195
|
+
... print(f"{sug.workflow_name}: {sug.success_rate:.0%}")
|
|
196
|
+
"""
|
|
197
|
+
suggestions: list[WorkflowSuggestion] = []
|
|
198
|
+
|
|
199
|
+
# Extract context elements for matching
|
|
200
|
+
task_keywords = []
|
|
201
|
+
if "task" in current_context:
|
|
202
|
+
task_keywords = current_context["task"].lower().split()
|
|
203
|
+
|
|
204
|
+
# Find similar past workflows
|
|
205
|
+
# Look for sequences of successful actions
|
|
206
|
+
past_workflows = await backend.execute_query(
|
|
207
|
+
"""
|
|
208
|
+
MATCH (s:Entity {type: 'session'})
|
|
209
|
+
OPTIONAL MATCH (a:Memory {type: 'workflow_action'})-[:IN_SESSION]->(s)
|
|
210
|
+
WHERE a.context.success = true
|
|
211
|
+
WITH s, COUNT(a) as action_count, COLLECT(a) as actions
|
|
212
|
+
WHERE action_count >= 3
|
|
213
|
+
RETURN s.id as session_id,
|
|
214
|
+
s.start_time as start_time,
|
|
215
|
+
s.last_activity as last_activity,
|
|
216
|
+
actions
|
|
217
|
+
ORDER BY s.last_activity DESC
|
|
218
|
+
LIMIT 20
|
|
219
|
+
""",
|
|
220
|
+
{},
|
|
221
|
+
)
|
|
222
|
+
|
|
223
|
+
# Analyze workflow patterns
|
|
224
|
+
workflow_patterns: dict[str, dict] = {}
|
|
225
|
+
|
|
226
|
+
for workflow in past_workflows:
|
|
227
|
+
actions = workflow.get("actions", [])
|
|
228
|
+
if not actions:
|
|
229
|
+
continue
|
|
230
|
+
|
|
231
|
+
# Create workflow signature from action types
|
|
232
|
+
action_sequence = " -> ".join(a.get("context", {}).get("action_type", "unknown") for a in actions[:10])
|
|
233
|
+
|
|
234
|
+
if action_sequence not in workflow_patterns:
|
|
235
|
+
workflow_patterns[action_sequence] = {
|
|
236
|
+
"sequence": action_sequence,
|
|
237
|
+
"count": 0,
|
|
238
|
+
"successes": 0,
|
|
239
|
+
"last_used": None,
|
|
240
|
+
"examples": [],
|
|
241
|
+
}
|
|
242
|
+
|
|
243
|
+
workflow_patterns[action_sequence]["count"] += 1
|
|
244
|
+
workflow_patterns[action_sequence]["successes"] += 1 # Already filtered for success
|
|
245
|
+
workflow_patterns[action_sequence]["examples"].append(workflow["session_id"])
|
|
246
|
+
|
|
247
|
+
last_activity = workflow.get("last_activity")
|
|
248
|
+
if last_activity and (
|
|
249
|
+
not workflow_patterns[action_sequence]["last_used"]
|
|
250
|
+
or last_activity > workflow_patterns[action_sequence]["last_used"]
|
|
251
|
+
):
|
|
252
|
+
workflow_patterns[action_sequence]["last_used"] = last_activity
|
|
253
|
+
|
|
254
|
+
# Convert patterns to suggestions
|
|
255
|
+
for pattern_key, pattern_data in workflow_patterns.items():
|
|
256
|
+
if pattern_data["count"] < 2: # Need at least 2 occurrences
|
|
257
|
+
continue
|
|
258
|
+
|
|
259
|
+
success_rate = pattern_data["successes"] / pattern_data["count"]
|
|
260
|
+
|
|
261
|
+
# Calculate relevance score (placeholder - can be enhanced with semantic matching)
|
|
262
|
+
relevance_score = 0.5
|
|
263
|
+
if task_keywords:
|
|
264
|
+
# Boost relevance if action types match keywords
|
|
265
|
+
for keyword in task_keywords:
|
|
266
|
+
if keyword in pattern_key.lower():
|
|
267
|
+
relevance_score += 0.1
|
|
268
|
+
|
|
269
|
+
relevance_score = min(relevance_score, 1.0)
|
|
270
|
+
|
|
271
|
+
# Extract steps from sequence
|
|
272
|
+
steps = pattern_key.split(" -> ")
|
|
273
|
+
|
|
274
|
+
suggestion = WorkflowSuggestion(
|
|
275
|
+
workflow_name=f"Workflow: {' -> '.join(steps[:3])}...",
|
|
276
|
+
description=f"Common workflow seen {pattern_data['count']} times",
|
|
277
|
+
steps=steps,
|
|
278
|
+
success_rate=success_rate,
|
|
279
|
+
relevance_score=relevance_score,
|
|
280
|
+
last_used=pattern_data["last_used"],
|
|
281
|
+
)
|
|
282
|
+
|
|
283
|
+
suggestions.append(suggestion)
|
|
284
|
+
|
|
285
|
+
# Sort by success rate and relevance
|
|
286
|
+
suggestions.sort(
|
|
287
|
+
key=lambda s: (s.success_rate * 0.6 + s.relevance_score * 0.4), reverse=True
|
|
288
|
+
)
|
|
289
|
+
|
|
290
|
+
return suggestions[:max_suggestions]
|
|
291
|
+
|
|
292
|
+
|
|
293
|
+
async def optimize_workflow(
|
|
294
|
+
backend: GraphBackend, session_id: str
|
|
295
|
+
) -> list[Recommendation]:
|
|
296
|
+
"""
|
|
297
|
+
Analyze workflow and provide optimization recommendations.
|
|
298
|
+
|
|
299
|
+
Args:
|
|
300
|
+
backend: Database backend
|
|
301
|
+
session_id: Session ID to analyze
|
|
302
|
+
|
|
303
|
+
Returns:
|
|
304
|
+
List of optimization recommendations
|
|
305
|
+
|
|
306
|
+
Example:
|
|
307
|
+
>>> recommendations = await optimize_workflow(backend, "session-123")
|
|
308
|
+
>>> for rec in recommendations:
|
|
309
|
+
... print(f"{rec.title} ({rec.impact} impact)")
|
|
310
|
+
"""
|
|
311
|
+
recommendations: list[Recommendation] = []
|
|
312
|
+
|
|
313
|
+
# Get session actions
|
|
314
|
+
actions = await backend.execute_query(
|
|
315
|
+
"""
|
|
316
|
+
MATCH (a:Memory {type: 'workflow_action'})-[:IN_SESSION]->(s:Entity {id: $session_id})
|
|
317
|
+
RETURN a.id as id,
|
|
318
|
+
a.context as context,
|
|
319
|
+
a.created_at as created_at
|
|
320
|
+
ORDER BY a.created_at
|
|
321
|
+
""",
|
|
322
|
+
{"session_id": session_id},
|
|
323
|
+
)
|
|
324
|
+
|
|
325
|
+
if not actions:
|
|
326
|
+
return recommendations
|
|
327
|
+
|
|
328
|
+
# Analyze for bottlenecks
|
|
329
|
+
slow_actions = [
|
|
330
|
+
a for a in actions if a.get("context", {}).get("duration_seconds", 0) > 30
|
|
331
|
+
]
|
|
332
|
+
|
|
333
|
+
if slow_actions:
|
|
334
|
+
rec = Recommendation(
|
|
335
|
+
recommendation_type="performance",
|
|
336
|
+
title="Slow actions detected",
|
|
337
|
+
description=f"Found {len(slow_actions)} actions taking over 30 seconds. "
|
|
338
|
+
"Consider optimizing these operations or running them in background.",
|
|
339
|
+
impact="medium",
|
|
340
|
+
evidence=[
|
|
341
|
+
f"Action {a['context'].get('action_type')} took "
|
|
342
|
+
f"{a['context'].get('duration_seconds')}s"
|
|
343
|
+
for a in slow_actions[:3]
|
|
344
|
+
],
|
|
345
|
+
)
|
|
346
|
+
recommendations.append(rec)
|
|
347
|
+
|
|
348
|
+
# Analyze for repeated failures
|
|
349
|
+
failed_actions = [a for a in actions if not a.get("context", {}).get("success", True)]
|
|
350
|
+
|
|
351
|
+
if len(failed_actions) >= 3:
|
|
352
|
+
# Look for repeated error patterns
|
|
353
|
+
error_types: dict[str, int] = {}
|
|
354
|
+
for action in failed_actions:
|
|
355
|
+
action_type = action.get("context", {}).get("action_type", "unknown")
|
|
356
|
+
error_types[action_type] = error_types.get(action_type, 0) + 1
|
|
357
|
+
|
|
358
|
+
for action_type, count in error_types.items():
|
|
359
|
+
if count >= 2:
|
|
360
|
+
rec = Recommendation(
|
|
361
|
+
recommendation_type="error_pattern",
|
|
362
|
+
title=f"Repeated failures in {action_type}",
|
|
363
|
+
description=f"Action type '{action_type}' failed {count} times. "
|
|
364
|
+
"This may indicate a systematic issue that needs addressing.",
|
|
365
|
+
impact="high",
|
|
366
|
+
evidence=[f"Failed {count} times in this session"],
|
|
367
|
+
)
|
|
368
|
+
recommendations.append(rec)
|
|
369
|
+
|
|
370
|
+
# Analyze action sequence for inefficiencies
|
|
371
|
+
action_types = [a.get("context", {}).get("action_type") for a in actions]
|
|
372
|
+
|
|
373
|
+
# Look for repeated back-and-forth patterns
|
|
374
|
+
for i in range(len(action_types) - 2):
|
|
375
|
+
if action_types[i] == action_types[i + 2] and action_types[i] != action_types[i + 1]:
|
|
376
|
+
rec = Recommendation(
|
|
377
|
+
recommendation_type="workflow_pattern",
|
|
378
|
+
title="Inefficient back-and-forth pattern detected",
|
|
379
|
+
description=f"Detected switching between {action_types[i]} and {action_types[i+1]} multiple times. "
|
|
380
|
+
"Consider batching similar operations together.",
|
|
381
|
+
impact="low",
|
|
382
|
+
evidence=[f"Pattern: {action_types[i]} -> {action_types[i+1]} -> {action_types[i]}"],
|
|
383
|
+
)
|
|
384
|
+
recommendations.append(rec)
|
|
385
|
+
break # Only report once
|
|
386
|
+
|
|
387
|
+
# Check for long sessions without breaks
|
|
388
|
+
if len(actions) > 50:
|
|
389
|
+
rec = Recommendation(
|
|
390
|
+
recommendation_type="productivity",
|
|
391
|
+
title="Long session detected",
|
|
392
|
+
description=f"This session has {len(actions)} actions. Consider taking breaks "
|
|
393
|
+
"for better productivity and code quality.",
|
|
394
|
+
impact="low",
|
|
395
|
+
evidence=[f"Session has {len(actions)} actions"],
|
|
396
|
+
)
|
|
397
|
+
recommendations.append(rec)
|
|
398
|
+
|
|
399
|
+
return recommendations
|
|
400
|
+
|
|
401
|
+
|
|
402
|
+
async def get_session_state(
|
|
403
|
+
backend: GraphBackend, session_id: str
|
|
404
|
+
) -> Optional[SessionState]:
|
|
405
|
+
"""
|
|
406
|
+
Get current state of a session for continuity.
|
|
407
|
+
|
|
408
|
+
Args:
|
|
409
|
+
backend: Database backend
|
|
410
|
+
session_id: Session ID
|
|
411
|
+
|
|
412
|
+
Returns:
|
|
413
|
+
SessionState if session exists, None otherwise
|
|
414
|
+
|
|
415
|
+
Example:
|
|
416
|
+
>>> state = await get_session_state(backend, "session-123")
|
|
417
|
+
>>> if state:
|
|
418
|
+
... print(f"Current task: {state.current_task}")
|
|
419
|
+
... print(f"Open problems: {len(state.open_problems)}")
|
|
420
|
+
"""
|
|
421
|
+
# Get session entity
|
|
422
|
+
session_data = await backend.execute_query(
|
|
423
|
+
"""
|
|
424
|
+
MATCH (s:Entity {id: $session_id, type: 'session'})
|
|
425
|
+
RETURN s.created_at as start_time,
|
|
426
|
+
s.last_activity as last_activity,
|
|
427
|
+
s.current_task as current_task,
|
|
428
|
+
s.context as context
|
|
429
|
+
""",
|
|
430
|
+
{"session_id": session_id},
|
|
431
|
+
)
|
|
432
|
+
|
|
433
|
+
if not session_data:
|
|
434
|
+
return None
|
|
435
|
+
|
|
436
|
+
session = session_data[0]
|
|
437
|
+
|
|
438
|
+
# Get recent actions to determine next steps
|
|
439
|
+
recent_actions = await backend.execute_query(
|
|
440
|
+
"""
|
|
441
|
+
MATCH (a:Memory {type: 'workflow_action'})-[:IN_SESSION]->(s:Entity {id: $session_id})
|
|
442
|
+
RETURN a.context as context
|
|
443
|
+
ORDER BY a.created_at DESC
|
|
444
|
+
LIMIT 5
|
|
445
|
+
""",
|
|
446
|
+
{"session_id": session_id},
|
|
447
|
+
)
|
|
448
|
+
|
|
449
|
+
# Find open problems (errors without solutions)
|
|
450
|
+
open_problems_data = await backend.execute_query(
|
|
451
|
+
"""
|
|
452
|
+
MATCH (e:Memory {type: 'error_pattern'})<-[:EXHIBITS]-(a:Memory {type: 'workflow_action'})
|
|
453
|
+
WHERE (a)-[:IN_SESSION]->(:Entity {id: $session_id})
|
|
454
|
+
AND NOT EXISTS {
|
|
455
|
+
MATCH (e)<-[:SOLVES]-(:Memory)
|
|
456
|
+
}
|
|
457
|
+
RETURN DISTINCT e.title as problem
|
|
458
|
+
LIMIT 10
|
|
459
|
+
""",
|
|
460
|
+
{"session_id": session_id},
|
|
461
|
+
)
|
|
462
|
+
|
|
463
|
+
open_problems = [p["problem"] for p in open_problems_data]
|
|
464
|
+
|
|
465
|
+
# Suggest next steps based on recent actions
|
|
466
|
+
next_steps = []
|
|
467
|
+
if recent_actions:
|
|
468
|
+
last_action = recent_actions[0]
|
|
469
|
+
action_type = last_action.get("context", {}).get("action_type")
|
|
470
|
+
success = last_action.get("context", {}).get("success", True)
|
|
471
|
+
|
|
472
|
+
if not success:
|
|
473
|
+
next_steps.append("Resolve the error from the last action")
|
|
474
|
+
elif action_type == "file_edit":
|
|
475
|
+
next_steps.append("Test the changes made")
|
|
476
|
+
elif action_type == "command" and "test" in last_action.get("context", {}).get("action_data", {}).get("command", ""):
|
|
477
|
+
if success:
|
|
478
|
+
next_steps.append("Commit the changes")
|
|
479
|
+
else:
|
|
480
|
+
next_steps.append("Fix failing tests")
|
|
481
|
+
|
|
482
|
+
state = SessionState(
|
|
483
|
+
session_id=session_id,
|
|
484
|
+
start_time=session.get("start_time", datetime.now()),
|
|
485
|
+
last_activity=session.get("last_activity", datetime.now()),
|
|
486
|
+
current_task=session.get("current_task"),
|
|
487
|
+
open_problems=open_problems,
|
|
488
|
+
next_steps=next_steps,
|
|
489
|
+
context=session.get("context", {}),
|
|
490
|
+
)
|
|
491
|
+
|
|
492
|
+
return state
|
|
@@ -0,0 +1,59 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Intelligence Layer - AI-powered features for the memory server.
|
|
3
|
+
|
|
4
|
+
This module provides:
|
|
5
|
+
- Automatic entity extraction from memory content
|
|
6
|
+
- Pattern recognition and similarity matching
|
|
7
|
+
- Temporal memory tracking and version history
|
|
8
|
+
- Context-aware intelligent retrieval
|
|
9
|
+
"""
|
|
10
|
+
|
|
11
|
+
from memorygraph.intelligence.entity_extraction import (
|
|
12
|
+
EntityType,
|
|
13
|
+
Entity,
|
|
14
|
+
EntityExtractor,
|
|
15
|
+
extract_entities,
|
|
16
|
+
link_entities,
|
|
17
|
+
)
|
|
18
|
+
from memorygraph.intelligence.pattern_recognition import (
|
|
19
|
+
PatternRecognizer,
|
|
20
|
+
find_similar_problems,
|
|
21
|
+
extract_patterns,
|
|
22
|
+
suggest_patterns,
|
|
23
|
+
)
|
|
24
|
+
from memorygraph.intelligence.temporal import (
|
|
25
|
+
TemporalMemory,
|
|
26
|
+
get_memory_history,
|
|
27
|
+
get_state_at,
|
|
28
|
+
track_entity_changes,
|
|
29
|
+
)
|
|
30
|
+
from memorygraph.intelligence.context_retrieval import (
|
|
31
|
+
ContextRetriever,
|
|
32
|
+
get_context,
|
|
33
|
+
get_project_context,
|
|
34
|
+
get_session_context,
|
|
35
|
+
)
|
|
36
|
+
|
|
37
|
+
__all__ = [
|
|
38
|
+
# Entity Extraction
|
|
39
|
+
"EntityType",
|
|
40
|
+
"Entity",
|
|
41
|
+
"EntityExtractor",
|
|
42
|
+
"extract_entities",
|
|
43
|
+
"link_entities",
|
|
44
|
+
# Pattern Recognition
|
|
45
|
+
"PatternRecognizer",
|
|
46
|
+
"find_similar_problems",
|
|
47
|
+
"extract_patterns",
|
|
48
|
+
"suggest_patterns",
|
|
49
|
+
# Temporal Memory
|
|
50
|
+
"TemporalMemory",
|
|
51
|
+
"get_memory_history",
|
|
52
|
+
"get_state_at",
|
|
53
|
+
"track_entity_changes",
|
|
54
|
+
# Context Retrieval
|
|
55
|
+
"ContextRetriever",
|
|
56
|
+
"get_context",
|
|
57
|
+
"get_project_context",
|
|
58
|
+
"get_session_context",
|
|
59
|
+
]
|