memorygraphMCP 0.11.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- memorygraph/__init__.py +50 -0
- memorygraph/__main__.py +12 -0
- memorygraph/advanced_tools.py +509 -0
- memorygraph/analytics/__init__.py +46 -0
- memorygraph/analytics/advanced_queries.py +727 -0
- memorygraph/backends/__init__.py +21 -0
- memorygraph/backends/base.py +179 -0
- memorygraph/backends/cloud.py +75 -0
- memorygraph/backends/cloud_backend.py +858 -0
- memorygraph/backends/factory.py +577 -0
- memorygraph/backends/falkordb_backend.py +749 -0
- memorygraph/backends/falkordblite_backend.py +746 -0
- memorygraph/backends/ladybugdb_backend.py +242 -0
- memorygraph/backends/memgraph_backend.py +327 -0
- memorygraph/backends/neo4j_backend.py +298 -0
- memorygraph/backends/sqlite_fallback.py +463 -0
- memorygraph/backends/turso.py +448 -0
- memorygraph/cli.py +743 -0
- memorygraph/cloud_database.py +297 -0
- memorygraph/config.py +295 -0
- memorygraph/database.py +933 -0
- memorygraph/graph_analytics.py +631 -0
- memorygraph/integration/__init__.py +69 -0
- memorygraph/integration/context_capture.py +426 -0
- memorygraph/integration/project_analysis.py +583 -0
- memorygraph/integration/workflow_tracking.py +492 -0
- memorygraph/intelligence/__init__.py +59 -0
- memorygraph/intelligence/context_retrieval.py +447 -0
- memorygraph/intelligence/entity_extraction.py +386 -0
- memorygraph/intelligence/pattern_recognition.py +420 -0
- memorygraph/intelligence/temporal.py +374 -0
- memorygraph/migration/__init__.py +27 -0
- memorygraph/migration/manager.py +579 -0
- memorygraph/migration/models.py +142 -0
- memorygraph/migration/scripts/__init__.py +17 -0
- memorygraph/migration/scripts/bitemporal_migration.py +595 -0
- memorygraph/migration/scripts/multitenancy_migration.py +452 -0
- memorygraph/migration_tools_module.py +146 -0
- memorygraph/models.py +684 -0
- memorygraph/proactive/__init__.py +46 -0
- memorygraph/proactive/outcome_learning.py +444 -0
- memorygraph/proactive/predictive.py +410 -0
- memorygraph/proactive/session_briefing.py +399 -0
- memorygraph/relationships.py +668 -0
- memorygraph/server.py +883 -0
- memorygraph/sqlite_database.py +1876 -0
- memorygraph/tools/__init__.py +59 -0
- memorygraph/tools/activity_tools.py +262 -0
- memorygraph/tools/memory_tools.py +315 -0
- memorygraph/tools/migration_tools.py +181 -0
- memorygraph/tools/relationship_tools.py +147 -0
- memorygraph/tools/search_tools.py +406 -0
- memorygraph/tools/temporal_tools.py +339 -0
- memorygraph/utils/__init__.py +10 -0
- memorygraph/utils/context_extractor.py +429 -0
- memorygraph/utils/error_handling.py +151 -0
- memorygraph/utils/export_import.py +425 -0
- memorygraph/utils/graph_algorithms.py +200 -0
- memorygraph/utils/pagination.py +149 -0
- memorygraph/utils/project_detection.py +133 -0
- memorygraphmcp-0.11.7.dist-info/METADATA +970 -0
- memorygraphmcp-0.11.7.dist-info/RECORD +65 -0
- memorygraphmcp-0.11.7.dist-info/WHEEL +4 -0
- memorygraphmcp-0.11.7.dist-info/entry_points.txt +2 -0
- memorygraphmcp-0.11.7.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,447 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Context-Aware Retrieval - Intelligent context retrieval beyond keyword search.
|
|
3
|
+
|
|
4
|
+
This module provides smart context assembly, relevance ranking,
|
|
5
|
+
and token-limited context formatting.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import logging
|
|
9
|
+
import re
|
|
10
|
+
from typing import Optional
|
|
11
|
+
from datetime import datetime, timedelta
|
|
12
|
+
|
|
13
|
+
logger = logging.getLogger(__name__)
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
class ContextRetriever:
|
|
17
|
+
"""Retrieves and ranks context intelligently with relevance-based scoring."""
|
|
18
|
+
|
|
19
|
+
def __init__(self, backend):
|
|
20
|
+
"""
|
|
21
|
+
Initialize context retriever with database backend.
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
backend: Database backend instance
|
|
25
|
+
"""
|
|
26
|
+
self.backend = backend
|
|
27
|
+
|
|
28
|
+
async def get_context(
|
|
29
|
+
self, query: str, max_tokens: int = 4000, project: Optional[str] = None
|
|
30
|
+
) -> dict:
|
|
31
|
+
"""
|
|
32
|
+
Get intelligent context for a query with smart ranking and token limiting.
|
|
33
|
+
|
|
34
|
+
Args:
|
|
35
|
+
query: User query to get context for
|
|
36
|
+
max_tokens: Maximum tokens to include in context
|
|
37
|
+
project: Optional project filter
|
|
38
|
+
|
|
39
|
+
Returns:
|
|
40
|
+
Dictionary with formatted context and source memory IDs
|
|
41
|
+
"""
|
|
42
|
+
from memorygraph.intelligence.entity_extraction import extract_entities
|
|
43
|
+
|
|
44
|
+
# Extract entities from query for matching
|
|
45
|
+
entities = extract_entities(query)
|
|
46
|
+
entity_texts = [e.text for e in entities if e.confidence > 0.6]
|
|
47
|
+
|
|
48
|
+
# Extract keywords for fallback matching
|
|
49
|
+
keywords = self._extract_keywords(query)
|
|
50
|
+
|
|
51
|
+
# Build search query
|
|
52
|
+
search_query = """
|
|
53
|
+
// Find memories matching entities or keywords
|
|
54
|
+
MATCH (m:Memory)
|
|
55
|
+
WHERE (
|
|
56
|
+
// Match by entities
|
|
57
|
+
any(entity IN $entities WHERE
|
|
58
|
+
exists((m)-[:MENTIONS]->(:Entity {text: entity}))
|
|
59
|
+
)
|
|
60
|
+
OR
|
|
61
|
+
// Match by keywords
|
|
62
|
+
any(keyword IN $keywords WHERE
|
|
63
|
+
toLower(m.content) CONTAINS keyword OR
|
|
64
|
+
toLower(m.title) CONTAINS keyword
|
|
65
|
+
)
|
|
66
|
+
)
|
|
67
|
+
// Apply project filter if provided
|
|
68
|
+
WITH m
|
|
69
|
+
WHERE $project IS NULL OR $project IN m.tags
|
|
70
|
+
|
|
71
|
+
// Calculate relevance score
|
|
72
|
+
WITH m,
|
|
73
|
+
// Entity match score
|
|
74
|
+
size([entity IN $entities WHERE
|
|
75
|
+
exists((m)-[:MENTIONS]->(:Entity {text: entity}))]) as entity_matches,
|
|
76
|
+
// Keyword match score
|
|
77
|
+
size([keyword IN $keywords WHERE
|
|
78
|
+
toLower(m.content) CONTAINS keyword OR
|
|
79
|
+
toLower(m.title) CONTAINS keyword]) as keyword_matches,
|
|
80
|
+
// Recency score (newer is better)
|
|
81
|
+
duration.between(m.created_at, datetime()).days as age_days
|
|
82
|
+
|
|
83
|
+
WITH m, entity_matches, keyword_matches, age_days,
|
|
84
|
+
// Combined relevance score
|
|
85
|
+
toFloat(entity_matches * 3 + keyword_matches * 2) /
|
|
86
|
+
(1.0 + age_days / 30.0) as relevance_score
|
|
87
|
+
|
|
88
|
+
// Get related memories via relationships
|
|
89
|
+
OPTIONAL MATCH (m)-[r]->(related:Memory)
|
|
90
|
+
WHERE type(r) IN ['SOLVES', 'BUILDS_ON', 'REQUIRES', 'RELATED_TO']
|
|
91
|
+
WITH m, relevance_score,
|
|
92
|
+
collect(DISTINCT {
|
|
93
|
+
id: related.id,
|
|
94
|
+
title: related.title,
|
|
95
|
+
rel_type: type(r),
|
|
96
|
+
rel_strength: coalesce(r.strength, 0.5)
|
|
97
|
+
}) as related_memories
|
|
98
|
+
|
|
99
|
+
// Order by relevance and limit results
|
|
100
|
+
ORDER BY relevance_score DESC, m.created_at DESC
|
|
101
|
+
LIMIT 20
|
|
102
|
+
|
|
103
|
+
RETURN m.id as id,
|
|
104
|
+
m.title as title,
|
|
105
|
+
m.content as content,
|
|
106
|
+
m.type as memory_type,
|
|
107
|
+
m.tags as tags,
|
|
108
|
+
m.created_at as created_at,
|
|
109
|
+
relevance_score,
|
|
110
|
+
entity_matches,
|
|
111
|
+
keyword_matches,
|
|
112
|
+
related_memories
|
|
113
|
+
"""
|
|
114
|
+
|
|
115
|
+
params = {
|
|
116
|
+
"entities": entity_texts,
|
|
117
|
+
"keywords": keywords,
|
|
118
|
+
"project": project,
|
|
119
|
+
}
|
|
120
|
+
|
|
121
|
+
try:
|
|
122
|
+
results = await self.backend.execute_query(search_query, params)
|
|
123
|
+
|
|
124
|
+
# Format context within token limit
|
|
125
|
+
context_parts = []
|
|
126
|
+
source_memories = []
|
|
127
|
+
estimated_tokens = 0
|
|
128
|
+
|
|
129
|
+
for record in results:
|
|
130
|
+
memory_summary = self._format_memory(record)
|
|
131
|
+
memory_tokens = self._estimate_tokens(memory_summary)
|
|
132
|
+
|
|
133
|
+
if estimated_tokens + memory_tokens > max_tokens:
|
|
134
|
+
break
|
|
135
|
+
|
|
136
|
+
context_parts.append(memory_summary)
|
|
137
|
+
source_memories.append({
|
|
138
|
+
"id": record["id"],
|
|
139
|
+
"title": record.get("title"),
|
|
140
|
+
"relevance": float(record.get("relevance_score", 0)),
|
|
141
|
+
})
|
|
142
|
+
estimated_tokens += memory_tokens
|
|
143
|
+
|
|
144
|
+
# Build structured context
|
|
145
|
+
context = "\n\n".join(context_parts)
|
|
146
|
+
|
|
147
|
+
return {
|
|
148
|
+
"context": context,
|
|
149
|
+
"source_memories": source_memories,
|
|
150
|
+
"total_memories": len(source_memories),
|
|
151
|
+
"estimated_tokens": estimated_tokens,
|
|
152
|
+
"query_entities": entity_texts,
|
|
153
|
+
"query_keywords": keywords,
|
|
154
|
+
}
|
|
155
|
+
|
|
156
|
+
except Exception as e:
|
|
157
|
+
logger.error(f"Error retrieving context for query '{query}': {e}")
|
|
158
|
+
return {
|
|
159
|
+
"context": "",
|
|
160
|
+
"source_memories": [],
|
|
161
|
+
"error": str(e),
|
|
162
|
+
}
|
|
163
|
+
|
|
164
|
+
async def get_project_context(self, project: str) -> dict:
|
|
165
|
+
"""
|
|
166
|
+
Get comprehensive overview of a project.
|
|
167
|
+
|
|
168
|
+
Args:
|
|
169
|
+
project: Project name or identifier
|
|
170
|
+
|
|
171
|
+
Returns:
|
|
172
|
+
Project context with recent activities, decisions, and issues
|
|
173
|
+
"""
|
|
174
|
+
query = """
|
|
175
|
+
MATCH (m:Memory)
|
|
176
|
+
WHERE $project IN m.tags
|
|
177
|
+
|
|
178
|
+
WITH m
|
|
179
|
+
ORDER BY m.created_at DESC
|
|
180
|
+
|
|
181
|
+
WITH collect(m) as all_memories
|
|
182
|
+
|
|
183
|
+
// Get recent memories
|
|
184
|
+
WITH all_memories,
|
|
185
|
+
[m IN all_memories WHERE m.created_at >= datetime() - duration({days: 7})][..10] as recent,
|
|
186
|
+
[m IN all_memories WHERE m.type = 'decision'][..5] as decisions,
|
|
187
|
+
[m IN all_memories WHERE m.type = 'problem' AND
|
|
188
|
+
NOT exists((m)<-[:SOLVES]-(:Memory))][..5] as open_problems,
|
|
189
|
+
[m IN all_memories WHERE m.type = 'solution'][..5] as solutions
|
|
190
|
+
|
|
191
|
+
RETURN {
|
|
192
|
+
total_memories: size(all_memories),
|
|
193
|
+
recent_activity: [m IN recent | {
|
|
194
|
+
id: m.id,
|
|
195
|
+
title: m.title,
|
|
196
|
+
type: m.type,
|
|
197
|
+
created_at: m.created_at
|
|
198
|
+
}],
|
|
199
|
+
decisions: [m IN decisions | {
|
|
200
|
+
id: m.id,
|
|
201
|
+
title: m.title,
|
|
202
|
+
created_at: m.created_at
|
|
203
|
+
}],
|
|
204
|
+
open_problems: [m IN open_problems | {
|
|
205
|
+
id: m.id,
|
|
206
|
+
title: m.title,
|
|
207
|
+
created_at: m.created_at
|
|
208
|
+
}],
|
|
209
|
+
solutions: [m IN solutions | {
|
|
210
|
+
id: m.id,
|
|
211
|
+
title: m.title,
|
|
212
|
+
created_at: m.created_at
|
|
213
|
+
}]
|
|
214
|
+
} as project_summary
|
|
215
|
+
"""
|
|
216
|
+
|
|
217
|
+
params = {"project": project}
|
|
218
|
+
|
|
219
|
+
try:
|
|
220
|
+
results = await self.backend.execute_query(query, params)
|
|
221
|
+
|
|
222
|
+
if results and "project_summary" in results[0]:
|
|
223
|
+
return results[0]["project_summary"]
|
|
224
|
+
else:
|
|
225
|
+
return {
|
|
226
|
+
"total_memories": 0,
|
|
227
|
+
"recent_activity": [],
|
|
228
|
+
"decisions": [],
|
|
229
|
+
"open_problems": [],
|
|
230
|
+
"solutions": [],
|
|
231
|
+
}
|
|
232
|
+
|
|
233
|
+
except Exception as e:
|
|
234
|
+
logger.error(f"Error getting project context for '{project}': {e}")
|
|
235
|
+
return {"error": str(e)}
|
|
236
|
+
|
|
237
|
+
async def get_session_context(
|
|
238
|
+
self, hours_back: int = 24, limit: int = 10
|
|
239
|
+
) -> dict:
|
|
240
|
+
"""
|
|
241
|
+
Get recent session context from the last N hours.
|
|
242
|
+
|
|
243
|
+
Args:
|
|
244
|
+
hours_back: How many hours of history to include
|
|
245
|
+
limit: Maximum number of memories to return
|
|
246
|
+
|
|
247
|
+
Returns:
|
|
248
|
+
Recent session context
|
|
249
|
+
"""
|
|
250
|
+
query = """
|
|
251
|
+
MATCH (m:Memory)
|
|
252
|
+
WHERE m.created_at >= datetime() - duration({hours: $hours_back})
|
|
253
|
+
|
|
254
|
+
WITH m
|
|
255
|
+
ORDER BY m.created_at DESC
|
|
256
|
+
LIMIT $limit
|
|
257
|
+
|
|
258
|
+
// Get related patterns
|
|
259
|
+
OPTIONAL MATCH (m)-[:MENTIONS]->(e:Entity)
|
|
260
|
+
WITH m, collect(DISTINCT e.text) as entities
|
|
261
|
+
|
|
262
|
+
RETURN m.id as id,
|
|
263
|
+
m.title as title,
|
|
264
|
+
m.content as content,
|
|
265
|
+
m.type as memory_type,
|
|
266
|
+
m.created_at as created_at,
|
|
267
|
+
entities
|
|
268
|
+
ORDER BY m.created_at DESC
|
|
269
|
+
"""
|
|
270
|
+
|
|
271
|
+
params = {
|
|
272
|
+
"hours_back": hours_back,
|
|
273
|
+
"limit": limit,
|
|
274
|
+
}
|
|
275
|
+
|
|
276
|
+
try:
|
|
277
|
+
results = await self.backend.execute_query(query, params)
|
|
278
|
+
|
|
279
|
+
memories = []
|
|
280
|
+
all_entities = set()
|
|
281
|
+
|
|
282
|
+
for record in results:
|
|
283
|
+
memories.append({
|
|
284
|
+
"id": record["id"],
|
|
285
|
+
"title": record.get("title"),
|
|
286
|
+
"type": record.get("memory_type"),
|
|
287
|
+
"created_at": record.get("created_at"),
|
|
288
|
+
"entities": record.get("entities", []),
|
|
289
|
+
})
|
|
290
|
+
all_entities.update(record.get("entities", []))
|
|
291
|
+
|
|
292
|
+
return {
|
|
293
|
+
"recent_memories": memories,
|
|
294
|
+
"total_count": len(memories),
|
|
295
|
+
"time_range_hours": hours_back,
|
|
296
|
+
"active_entities": list(all_entities),
|
|
297
|
+
}
|
|
298
|
+
|
|
299
|
+
except Exception as e:
|
|
300
|
+
logger.error(f"Error getting session context: {e}")
|
|
301
|
+
return {"error": str(e)}
|
|
302
|
+
|
|
303
|
+
def _format_memory(self, record: dict) -> str:
|
|
304
|
+
"""
|
|
305
|
+
Format a memory record into readable context.
|
|
306
|
+
|
|
307
|
+
Args:
|
|
308
|
+
record: Memory record from database
|
|
309
|
+
|
|
310
|
+
Returns:
|
|
311
|
+
Formatted string representation
|
|
312
|
+
"""
|
|
313
|
+
title = record.get("title", "Untitled")
|
|
314
|
+
memory_type = record.get("memory_type", "unknown")
|
|
315
|
+
content = record.get("content", "")
|
|
316
|
+
relevance = record.get("relevance_score", 0)
|
|
317
|
+
|
|
318
|
+
# Truncate content if too long
|
|
319
|
+
if len(content) > 500:
|
|
320
|
+
content = content[:497] + "..."
|
|
321
|
+
|
|
322
|
+
formatted = f"## {title} ({memory_type})\n"
|
|
323
|
+
if relevance > 0:
|
|
324
|
+
formatted += f"Relevance: {relevance:.2f}\n"
|
|
325
|
+
formatted += f"{content}"
|
|
326
|
+
|
|
327
|
+
# Add related memories if present
|
|
328
|
+
related = record.get("related_memories", [])
|
|
329
|
+
if related:
|
|
330
|
+
formatted += "\n\nRelated: "
|
|
331
|
+
related_titles = [r.get("title", "Untitled") for r in related[:3]]
|
|
332
|
+
formatted += ", ".join(related_titles)
|
|
333
|
+
|
|
334
|
+
return formatted
|
|
335
|
+
|
|
336
|
+
def _estimate_tokens(self, text: str) -> int:
|
|
337
|
+
"""
|
|
338
|
+
Estimate token count for text (rough approximation).
|
|
339
|
+
|
|
340
|
+
Args:
|
|
341
|
+
text: Text to estimate tokens for
|
|
342
|
+
|
|
343
|
+
Returns:
|
|
344
|
+
Estimated token count
|
|
345
|
+
"""
|
|
346
|
+
# Rough estimate: ~4 characters per token
|
|
347
|
+
return len(text) // 4
|
|
348
|
+
|
|
349
|
+
def _extract_keywords(self, text: str) -> list[str]:
|
|
350
|
+
"""
|
|
351
|
+
Extract keywords from query text.
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
text: Text to extract keywords from
|
|
355
|
+
|
|
356
|
+
Returns:
|
|
357
|
+
List of keywords
|
|
358
|
+
"""
|
|
359
|
+
# Remove common stop words
|
|
360
|
+
stop_words = {
|
|
361
|
+
"the", "a", "an", "and", "or", "but", "in", "on", "at",
|
|
362
|
+
"to", "for", "of", "with", "by", "from", "is", "are",
|
|
363
|
+
"was", "were", "be", "been", "being", "have", "has", "had",
|
|
364
|
+
"do", "does", "did", "will", "would", "should", "could", "may",
|
|
365
|
+
"might", "can", "this", "that", "these", "those", "what", "which",
|
|
366
|
+
"who", "when", "where", "why", "how",
|
|
367
|
+
}
|
|
368
|
+
|
|
369
|
+
# Tokenize and filter
|
|
370
|
+
words = re.findall(r"\b[a-z]{3,}\b", text.lower())
|
|
371
|
+
keywords = [w for w in words if w not in stop_words]
|
|
372
|
+
|
|
373
|
+
# Return unique keywords
|
|
374
|
+
return list(set(keywords))
|
|
375
|
+
|
|
376
|
+
|
|
377
|
+
# Convenience functions
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
async def get_context(
|
|
381
|
+
backend, query: str, max_tokens: int = 4000, project: Optional[str] = None
|
|
382
|
+
) -> dict:
|
|
383
|
+
"""
|
|
384
|
+
Get intelligent context for a query.
|
|
385
|
+
|
|
386
|
+
Args:
|
|
387
|
+
backend: Database backend instance
|
|
388
|
+
query: User query
|
|
389
|
+
max_tokens: Maximum tokens in context
|
|
390
|
+
project: Optional project filter
|
|
391
|
+
|
|
392
|
+
Returns:
|
|
393
|
+
Context dictionary with formatted text and source memories
|
|
394
|
+
|
|
395
|
+
Example:
|
|
396
|
+
>>> context = await get_context(
|
|
397
|
+
... backend,
|
|
398
|
+
... "How do I implement authentication?",
|
|
399
|
+
... max_tokens=2000
|
|
400
|
+
... )
|
|
401
|
+
>>> print(context["context"])
|
|
402
|
+
"""
|
|
403
|
+
retriever = ContextRetriever(backend)
|
|
404
|
+
return await retriever.get_context(query, max_tokens, project)
|
|
405
|
+
|
|
406
|
+
|
|
407
|
+
async def get_project_context(backend, project: str) -> dict:
|
|
408
|
+
"""
|
|
409
|
+
Get comprehensive project overview.
|
|
410
|
+
|
|
411
|
+
Args:
|
|
412
|
+
backend: Database backend instance
|
|
413
|
+
project: Project name
|
|
414
|
+
|
|
415
|
+
Returns:
|
|
416
|
+
Project context with activities and issues
|
|
417
|
+
|
|
418
|
+
Example:
|
|
419
|
+
>>> context = await get_project_context(backend, "my-app")
|
|
420
|
+
>>> print(f"Total memories: {context['total_memories']}")
|
|
421
|
+
>>> print(f"Open problems: {len(context['open_problems'])}")
|
|
422
|
+
"""
|
|
423
|
+
retriever = ContextRetriever(backend)
|
|
424
|
+
return await retriever.get_project_context(project)
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
async def get_session_context(
|
|
428
|
+
backend, hours_back: int = 24, limit: int = 10
|
|
429
|
+
) -> dict:
|
|
430
|
+
"""
|
|
431
|
+
Get recent session context.
|
|
432
|
+
|
|
433
|
+
Args:
|
|
434
|
+
backend: Database backend instance
|
|
435
|
+
hours_back: Hours of history to include
|
|
436
|
+
limit: Maximum memories to return
|
|
437
|
+
|
|
438
|
+
Returns:
|
|
439
|
+
Recent session context
|
|
440
|
+
|
|
441
|
+
Example:
|
|
442
|
+
>>> context = await get_session_context(backend, hours_back=12)
|
|
443
|
+
>>> for memory in context["recent_memories"]:
|
|
444
|
+
... print(f"{memory['title']} - {memory['type']}")
|
|
445
|
+
"""
|
|
446
|
+
retriever = ContextRetriever(backend)
|
|
447
|
+
return await retriever.get_session_context(hours_back, limit)
|