remdb 0.3.0__py3-none-any.whl → 0.3.114__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

Files changed (98) hide show
  1. rem/__init__.py +129 -2
  2. rem/agentic/README.md +76 -0
  3. rem/agentic/__init__.py +15 -0
  4. rem/agentic/agents/__init__.py +16 -2
  5. rem/agentic/agents/sse_simulator.py +500 -0
  6. rem/agentic/context.py +28 -22
  7. rem/agentic/llm_provider_models.py +301 -0
  8. rem/agentic/otel/setup.py +92 -4
  9. rem/agentic/providers/phoenix.py +32 -43
  10. rem/agentic/providers/pydantic_ai.py +142 -22
  11. rem/agentic/schema.py +358 -21
  12. rem/agentic/tools/rem_tools.py +3 -3
  13. rem/api/README.md +238 -1
  14. rem/api/deps.py +255 -0
  15. rem/api/main.py +151 -37
  16. rem/api/mcp_router/resources.py +1 -1
  17. rem/api/mcp_router/server.py +17 -2
  18. rem/api/mcp_router/tools.py +143 -7
  19. rem/api/middleware/tracking.py +172 -0
  20. rem/api/routers/admin.py +277 -0
  21. rem/api/routers/auth.py +124 -0
  22. rem/api/routers/chat/completions.py +152 -16
  23. rem/api/routers/chat/models.py +7 -3
  24. rem/api/routers/chat/sse_events.py +526 -0
  25. rem/api/routers/chat/streaming.py +608 -45
  26. rem/api/routers/dev.py +81 -0
  27. rem/api/routers/feedback.py +148 -0
  28. rem/api/routers/messages.py +473 -0
  29. rem/api/routers/models.py +78 -0
  30. rem/api/routers/query.py +357 -0
  31. rem/api/routers/shared_sessions.py +406 -0
  32. rem/auth/middleware.py +126 -27
  33. rem/cli/commands/README.md +201 -70
  34. rem/cli/commands/ask.py +13 -10
  35. rem/cli/commands/cluster.py +1359 -0
  36. rem/cli/commands/configure.py +4 -3
  37. rem/cli/commands/db.py +350 -137
  38. rem/cli/commands/experiments.py +76 -72
  39. rem/cli/commands/process.py +22 -15
  40. rem/cli/commands/scaffold.py +47 -0
  41. rem/cli/commands/schema.py +95 -49
  42. rem/cli/main.py +29 -6
  43. rem/config.py +2 -2
  44. rem/models/core/core_model.py +7 -1
  45. rem/models/core/rem_query.py +5 -2
  46. rem/models/entities/__init__.py +21 -0
  47. rem/models/entities/domain_resource.py +38 -0
  48. rem/models/entities/feedback.py +123 -0
  49. rem/models/entities/message.py +30 -1
  50. rem/models/entities/session.py +83 -0
  51. rem/models/entities/shared_session.py +180 -0
  52. rem/models/entities/user.py +10 -3
  53. rem/registry.py +373 -0
  54. rem/schemas/agents/rem.yaml +7 -3
  55. rem/services/content/providers.py +94 -140
  56. rem/services/content/service.py +92 -20
  57. rem/services/dreaming/affinity_service.py +2 -16
  58. rem/services/dreaming/moment_service.py +2 -15
  59. rem/services/embeddings/api.py +24 -17
  60. rem/services/embeddings/worker.py +16 -16
  61. rem/services/phoenix/EXPERIMENT_DESIGN.md +3 -3
  62. rem/services/phoenix/client.py +252 -19
  63. rem/services/postgres/README.md +159 -15
  64. rem/services/postgres/__init__.py +2 -1
  65. rem/services/postgres/diff_service.py +426 -0
  66. rem/services/postgres/pydantic_to_sqlalchemy.py +427 -129
  67. rem/services/postgres/repository.py +132 -0
  68. rem/services/postgres/schema_generator.py +86 -5
  69. rem/services/postgres/service.py +6 -6
  70. rem/services/rate_limit.py +113 -0
  71. rem/services/rem/README.md +14 -0
  72. rem/services/rem/parser.py +44 -9
  73. rem/services/rem/service.py +36 -2
  74. rem/services/session/compression.py +17 -1
  75. rem/services/session/reload.py +1 -1
  76. rem/services/user_service.py +98 -0
  77. rem/settings.py +169 -17
  78. rem/sql/background_indexes.sql +21 -16
  79. rem/sql/migrations/001_install.sql +231 -54
  80. rem/sql/migrations/002_install_models.sql +457 -393
  81. rem/sql/migrations/003_optional_extensions.sql +326 -0
  82. rem/utils/constants.py +97 -0
  83. rem/utils/date_utils.py +228 -0
  84. rem/utils/embeddings.py +17 -4
  85. rem/utils/files.py +167 -0
  86. rem/utils/mime_types.py +158 -0
  87. rem/utils/model_helpers.py +156 -1
  88. rem/utils/schema_loader.py +191 -35
  89. rem/utils/sql_types.py +3 -1
  90. rem/utils/vision.py +9 -14
  91. rem/workers/README.md +14 -14
  92. rem/workers/db_maintainer.py +74 -0
  93. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/METADATA +303 -164
  94. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/RECORD +96 -70
  95. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/WHEEL +1 -1
  96. rem/sql/002_install_models.sql +0 -1068
  97. rem/sql/install_models.sql +0 -1038
  98. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/entry_points.txt +0 -0
@@ -335,3 +335,135 @@ class Repository(Generic[T]):
335
335
  row = await conn.fetchrow(sql, *params)
336
336
 
337
337
  return row[0] if row else 0
338
+
339
+ async def find_paginated(
340
+ self,
341
+ filters: dict[str, Any],
342
+ page: int = 1,
343
+ page_size: int = 50,
344
+ order_by: str = "created_at DESC",
345
+ partition_by: str | None = None,
346
+ ) -> dict[str, Any]:
347
+ """
348
+ Find records with page-based pagination using CTE with ROW_NUMBER().
349
+
350
+ Uses a CTE with ROW_NUMBER() OVER (PARTITION BY ... ORDER BY ...) for
351
+ efficient pagination with total count in a single query.
352
+
353
+ Args:
354
+ filters: Dict of field -> value filters (AND-ed together)
355
+ page: Page number (1-indexed)
356
+ page_size: Number of records per page
357
+ order_by: ORDER BY clause for row numbering (default: "created_at DESC")
358
+ partition_by: Optional field to partition by (e.g., "user_id").
359
+ If None, uses global row numbering.
360
+
361
+ Returns:
362
+ Dict containing:
363
+ - data: List of model instances for the page
364
+ - total: Total count of records matching filters
365
+ - page: Current page number
366
+ - page_size: Records per page
367
+ - total_pages: Total number of pages
368
+ - has_next: Whether there are more pages
369
+ - has_previous: Whether there are previous pages
370
+
371
+ Example:
372
+ result = await repo.find_paginated(
373
+ {"tenant_id": "acme", "user_id": "alice"},
374
+ page=2,
375
+ page_size=20,
376
+ order_by="created_at DESC",
377
+ partition_by="user_id"
378
+ )
379
+ # result = {
380
+ # "data": [...],
381
+ # "total": 150,
382
+ # "page": 2,
383
+ # "page_size": 20,
384
+ # "total_pages": 8,
385
+ # "has_next": True,
386
+ # "has_previous": True
387
+ # }
388
+ """
389
+ if not settings.postgres.enabled or not self.db:
390
+ logger.debug(f"Postgres disabled, returning empty {self.model_class.__name__} pagination")
391
+ return {
392
+ "data": [],
393
+ "total": 0,
394
+ "page": page,
395
+ "page_size": page_size,
396
+ "total_pages": 0,
397
+ "has_next": False,
398
+ "has_previous": False,
399
+ }
400
+
401
+ # Ensure connection
402
+ if not self.db.pool:
403
+ await self.db.connect()
404
+
405
+ # Type guard: ensure pool is not None after connect
406
+ if not self.db.pool:
407
+ raise RuntimeError("Failed to establish database connection")
408
+
409
+ # Build WHERE clause from filters
410
+ where_conditions = ["deleted_at IS NULL"]
411
+ params: list[Any] = []
412
+ param_idx = 1
413
+
414
+ for field, value in filters.items():
415
+ where_conditions.append(f"{field} = ${param_idx}")
416
+ params.append(value)
417
+ param_idx += 1
418
+
419
+ where_clause = " AND ".join(where_conditions)
420
+
421
+ # Build PARTITION BY clause
422
+ partition_clause = f"PARTITION BY {partition_by}" if partition_by else ""
423
+
424
+ # Build the CTE query with ROW_NUMBER() and COUNT() window functions
425
+ # This gives us pagination + total count in a single query
426
+ sql = f"""
427
+ WITH numbered AS (
428
+ SELECT *,
429
+ ROW_NUMBER() OVER ({partition_clause} ORDER BY {order_by}) as _row_num,
430
+ COUNT(*) OVER ({partition_clause}) as _total_count
431
+ FROM {self.table_name}
432
+ WHERE {where_clause}
433
+ )
434
+ SELECT * FROM numbered
435
+ WHERE _row_num > ${param_idx} AND _row_num <= ${param_idx + 1}
436
+ ORDER BY _row_num
437
+ """
438
+
439
+ # Calculate row range for the page
440
+ start_row = (page - 1) * page_size
441
+ end_row = page * page_size
442
+ params.extend([start_row, end_row])
443
+
444
+ async with self.db.pool.acquire() as conn:
445
+ rows = await conn.fetch(sql, *params)
446
+
447
+ # Extract total from first row (all rows have the same _total_count)
448
+ total = rows[0]["_total_count"] if rows else 0
449
+
450
+ # Remove internal columns and convert to models
451
+ data = []
452
+ for row in rows:
453
+ row_dict = dict(row)
454
+ row_dict.pop("_row_num", None)
455
+ row_dict.pop("_total_count", None)
456
+ data.append(self.model_class.model_validate(row_dict))
457
+
458
+ # Calculate pagination metadata
459
+ total_pages = (total + page_size - 1) // page_size if total > 0 else 0
460
+
461
+ return {
462
+ "data": data,
463
+ "total": total,
464
+ "page": page,
465
+ "page_size": page_size,
466
+ "total_pages": total_pages,
467
+ "has_next": page < total_pages,
468
+ "has_previous": page > 1,
469
+ }
@@ -1,7 +1,12 @@
1
1
  """
2
2
  Schema generation utility from Pydantic models.
3
3
 
4
- Scans a directory of Pydantic models and generates complete database schemas including:
4
+ Generates complete database schemas from:
5
+ 1. REM's core models (Resource, Moment, User, etc.)
6
+ 2. Models registered via rem.register_model() or rem.register_models()
7
+ 3. Models discovered from a directory scan
8
+
9
+ Output includes:
5
10
  - Primary tables
6
11
  - Embeddings tables
7
12
  - KV_STORE triggers
@@ -11,8 +16,12 @@ Scans a directory of Pydantic models and generates complete database schemas inc
11
16
  Usage:
12
17
  from rem.services.postgres.schema_generator import SchemaGenerator
13
18
 
19
+ # Generate from registry (includes core + registered models)
14
20
  generator = SchemaGenerator()
15
- schema = generator.generate_from_directory("src/rem/models/entities")
21
+ schema = await generator.generate_from_registry()
22
+
23
+ # Or generate from directory (legacy)
24
+ schema = await generator.generate_from_directory("src/rem/models/entities")
16
25
 
17
26
  # Write to file
18
27
  with open("src/rem/sql/schema.sql", "w") as f:
@@ -228,12 +237,65 @@ class SchemaGenerator:
228
237
  self.schemas[table_name] = schema
229
238
  return schema
230
239
 
240
+ async def generate_from_registry(
241
+ self, output_file: str | None = None, include_core: bool = True
242
+ ) -> str:
243
+ """
244
+ Generate complete schema from the model registry.
245
+
246
+ Includes:
247
+ 1. REM's core models (if include_core=True)
248
+ 2. Models registered via rem.register_model() or rem.register_models()
249
+
250
+ Args:
251
+ output_file: Optional output file path (relative to output_dir)
252
+ include_core: If True, include REM's core models (default: True)
253
+
254
+ Returns:
255
+ Complete SQL schema as string
256
+
257
+ Example:
258
+ import rem
259
+ from rem.models.core import CoreModel
260
+
261
+ # Register custom model
262
+ @rem.register_model
263
+ class CustomEntity(CoreModel):
264
+ name: str
265
+
266
+ # Generate schema (includes core + custom)
267
+ generator = SchemaGenerator()
268
+ schema = await generator.generate_from_registry()
269
+ """
270
+ from ...registry import get_model_registry
271
+
272
+ registry = get_model_registry()
273
+ models = registry.get_models(include_core=include_core)
274
+
275
+ logger.info(f"Generating schema from registry: {len(models)} models")
276
+
277
+ # Generate schemas for each model
278
+ for model_name, ext in models.items():
279
+ await self.generate_schema_for_model(
280
+ ext.model,
281
+ table_name=ext.table_name,
282
+ entity_key_field=ext.entity_key_field,
283
+ )
284
+
285
+ return self._generate_sql_output(
286
+ source="model registry",
287
+ output_file=output_file,
288
+ )
289
+
231
290
  async def generate_from_directory(
232
291
  self, directory: str | Path, output_file: str | None = None
233
292
  ) -> str:
234
293
  """
235
294
  Generate complete schema from all models in a directory.
236
295
 
296
+ Note: For most use cases, prefer generate_from_registry() which uses
297
+ the model registry pattern.
298
+
237
299
  Args:
238
300
  directory: Path to directory with Pydantic models
239
301
  output_file: Optional output file path (relative to output_dir)
@@ -248,12 +310,31 @@ class SchemaGenerator:
248
310
  for model_name, model in models.items():
249
311
  await self.generate_schema_for_model(model)
250
312
 
251
- # Combine into single SQL file
313
+ return self._generate_sql_output(
314
+ source=f"directory: {directory}",
315
+ output_file=output_file,
316
+ )
317
+
318
+ def _generate_sql_output(
319
+ self, source: str, output_file: str | None = None
320
+ ) -> str:
321
+ """
322
+ Generate SQL output from accumulated schemas.
323
+
324
+ Args:
325
+ source: Description of schema source (for header comment)
326
+ output_file: Optional output file path (relative to output_dir)
327
+
328
+ Returns:
329
+ Complete SQL schema as string
330
+ """
331
+ import datetime
332
+
252
333
  sql_parts = [
253
334
  "-- REM Model Schema (install_models.sql)",
254
335
  "-- Generated from Pydantic models",
255
- f"-- Source directory: {directory}",
256
- "-- Generated at: " + __import__("datetime").datetime.now().isoformat(),
336
+ f"-- Source: {source}",
337
+ f"-- Generated at: {datetime.datetime.now().isoformat()}",
257
338
  "--",
258
339
  "-- DO NOT EDIT MANUALLY - Regenerate with: rem db schema generate",
259
340
  "--",
@@ -190,19 +190,19 @@ class PostgresService:
190
190
 
191
191
  async def connect(self) -> None:
192
192
  """Establish database connection pool."""
193
- logger.info(f"Connecting to PostgreSQL with pool size {self.pool_size}")
193
+ logger.debug(f"Connecting to PostgreSQL with pool size {self.pool_size}")
194
194
  self.pool = await asyncpg.create_pool(
195
195
  self.connection_string,
196
196
  min_size=1,
197
197
  max_size=self.pool_size,
198
198
  init=self._init_connection, # Configure JSONB codec on each connection
199
199
  )
200
- logger.info("PostgreSQL connection pool established")
200
+ logger.debug("PostgreSQL connection pool established")
201
201
 
202
202
  # Start embedding worker if available
203
203
  if self.embedding_worker and hasattr(self.embedding_worker, "start"):
204
204
  await self.embedding_worker.start()
205
- logger.info("Embedding worker started")
205
+ logger.debug("Embedding worker started")
206
206
 
207
207
  async def disconnect(self) -> None:
208
208
  """Close database connection pool."""
@@ -211,10 +211,10 @@ class PostgresService:
211
211
  # The worker will be stopped explicitly when the application shuts down
212
212
 
213
213
  if self.pool:
214
- logger.info("Closing PostgreSQL connection pool")
214
+ logger.debug("Closing PostgreSQL connection pool")
215
215
  await self.pool.close()
216
216
  self.pool = None
217
- logger.info("PostgreSQL connection pool closed")
217
+ logger.debug("PostgreSQL connection pool closed")
218
218
 
219
219
  async def execute(
220
220
  self,
@@ -631,7 +631,7 @@ class PostgresService:
631
631
  table_name: str,
632
632
  embedding: list[float],
633
633
  limit: int = 10,
634
- min_similarity: float = 0.7,
634
+ min_similarity: float = 0.3,
635
635
  tenant_id: Optional[str] = None,
636
636
  ) -> list[dict[str, Any]]:
637
637
  """
@@ -0,0 +1,113 @@
1
+ """
2
+ Rate Limit Service - Postgres-backed rate limiting.
3
+
4
+ Implements tenant-aware, tiered rate limiting using PostgreSQL UNLOGGED tables
5
+ for high performance. Supports monthly quotas and short-term burst limits.
6
+ """
7
+
8
+ import random
9
+ from datetime import datetime, timezone
10
+ from enum import Enum
11
+ from typing import Optional
12
+
13
+ from loguru import logger
14
+
15
+ from ..models.entities.user import UserTier
16
+ from .postgres.service import PostgresService
17
+
18
+
19
+ class RateLimitService:
20
+ """
21
+ Service for tracking and enforcing API rate limits.
22
+
23
+ Uses an UNLOGGED table `rate_limits` for performance.
24
+ Note: Counts in UNLOGGED tables may be lost on database crash/restart.
25
+ """
26
+
27
+ def __init__(self, db: PostgresService):
28
+ self.db = db
29
+
30
+ # Rate limits configuration
31
+ # Format: (limit, period_seconds)
32
+ # This is a simple implementation. In production, move to settings.
33
+ self.TIER_CONFIG = {
34
+ UserTier.ANONYMOUS: {"limit": 1000, "period": 3600}, # 1000/hour (for testing)
35
+ UserTier.FREE: {"limit": 50, "period": 2592000}, # 50/month (~30 days)
36
+ UserTier.BASIC: {"limit": 10000, "period": 2592000}, # 10k/month
37
+ UserTier.PRO: {"limit": 100000, "period": 2592000}, # 100k/month
38
+ }
39
+
40
+ async def check_rate_limit(
41
+ self,
42
+ tenant_id: str,
43
+ identifier: str,
44
+ tier: UserTier
45
+ ) -> tuple[bool, int, int]:
46
+ """
47
+ Check if request is allowed under the rate limit.
48
+
49
+ Args:
50
+ tenant_id: Tenant identifier
51
+ identifier: User ID or Anonymous ID
52
+ tier: User subscription tier
53
+
54
+ Returns:
55
+ Tuple (is_allowed, current_count, limit)
56
+ """
57
+ config = self.TIER_CONFIG.get(tier, self.TIER_CONFIG[UserTier.FREE])
58
+ limit = config["limit"]
59
+ period = config["period"]
60
+
61
+ # Construct time-window key
62
+ now = datetime.now(timezone.utc)
63
+
64
+ if period >= 2592000: # Monthly
65
+ time_key = now.strftime("%Y-%m")
66
+ elif period >= 86400: # Daily
67
+ time_key = now.strftime("%Y-%m-%d")
68
+ elif period >= 3600: # Hourly
69
+ time_key = now.strftime("%Y-%m-%d-%H")
70
+ else: # Minute/Second (fallback)
71
+ time_key = int(now.timestamp() / period)
72
+
73
+ key = f"{tenant_id}:{identifier}:{tier.value}:{time_key}"
74
+
75
+ # Calculate expiry (for cleanup)
76
+ expires_at = now.timestamp() + period
77
+
78
+ # Atomic UPSERT to increment counter
79
+ # Returns the new count
80
+ query = """
81
+ INSERT INTO rate_limits (key, count, expires_at)
82
+ VALUES ($1, 1, to_timestamp($2))
83
+ ON CONFLICT (key) DO UPDATE
84
+ SET count = rate_limits.count + 1
85
+ RETURNING count;
86
+ """
87
+
88
+ try:
89
+ count = await self.db.fetchval(query, key, expires_at)
90
+ except Exception as e:
91
+ logger.error(f"Rate limit check failed: {e}")
92
+ # Fail open to avoid blocking users on DB error
93
+ return True, 0, limit
94
+
95
+ is_allowed = count <= limit
96
+
97
+ # Probabilistic cleanup (1% chance)
98
+ if random.random() < 0.01:
99
+ await self.cleanup_expired()
100
+
101
+ return is_allowed, count, limit
102
+
103
+ async def cleanup_expired(self):
104
+ """Remove expired rate limit keys."""
105
+ try:
106
+ # Use a small limit to avoid locking/long queries
107
+ query = """
108
+ DELETE FROM rate_limits
109
+ WHERE expires_at < NOW()
110
+ """
111
+ await self.db.execute(query)
112
+ except Exception as e:
113
+ logger.warning(f"Rate limit cleanup failed: {e}")
@@ -302,3 +302,17 @@ See `tests/integration/test_rem_query_evolution.py` for stage-based validation a
302
302
  * **Unified View**: The underlying SQL function `rem_traverse` uses a view `all_graph_edges` that unions `graph_edges` from all entity tables (`resources`, `moments`, `users`, etc.). This enables polymorphic traversal without complex joins in the application layer.
303
303
  * **KV Store**: Edge destinations (`dst`) are resolved to entity IDs using the `kv_store`. This requires that all traversable entities have an entry in the `kv_store` (handled automatically by database triggers).
304
304
  * **Iterated Retrieval**: REM is architected for multi-turn retrieval where LLMs conduct conversational database exploration. Each query informs the next, enabling emergent information discovery without requiring upfront schema knowledge.
305
+
306
+ ## Scaling & Architectural Decisions
307
+
308
+ ### 1. Hybrid Adjacency List
309
+ REM implements a **Hybrid Adjacency List** pattern to balance strict relational guarantees with graph flexibility:
310
+ * **Primary Storage (Source of Truth):** Standard PostgreSQL tables (`resources`, `moments`, etc.) enforce schema validation, constraints, and type safety.
311
+ * **Graph Overlay:** Relationships are stored as "inline edges" within a JSONB column (`graph_edges`) on each entity.
312
+ * **Performance Layer:** A denormalized `UNLOGGED` table (`kv_store`) acts as a high-speed cache, mapping human-readable keys to internal UUIDs and edges. This avoids the traditional "join bomb" of traversing normalized SQL tables while avoiding the operational complexity of a separate graph database (e.g., Neo4j).
313
+
314
+ ### 2. The Pareto Principle in Graph Algorithms
315
+ We explicitly choose **Simplicity over Full-Scale Graph Analytics**.
316
+ * **Hypothesis:** For LLM Agent workloads, 80% of the value is derived from **local context retrieval** (1-3 hops via `LOOKUP` and `TRAVERSE`).
317
+ * **Diminishing Returns:** Global graph algorithms (PageRank, Community Detection) offer diminishing returns for real-time agentic retrieval tasks. Agents typically need to answer specific questions ("Who worked on file X?"), which is a local neighborhood problem, not a global cluster analysis problem.
318
+ * **Future Scaling:** If deeper analysis is needed, we prefer **Graph + Vector (RAG)** approaches (using semantic similarity to find implicit links) over complex explicit graph algorithms.
@@ -50,9 +50,36 @@ class RemQueryParser:
50
50
  params: Dict[str, Any] = {}
51
51
  positional_args: List[str] = []
52
52
 
53
- # Process remaining tokens
54
- for token in tokens[1:]:
55
- if "=" in token:
53
+ # For SQL queries, preserve the raw query (keywords like LIMIT are SQL keywords)
54
+ if query_type == QueryType.SQL:
55
+ # Everything after "SQL" is the raw SQL query
56
+ raw_sql = query_string[3:].strip() # Skip "SQL" prefix
57
+ params["raw_query"] = raw_sql
58
+ return query_type, params
59
+
60
+ # Process remaining tokens, handling REM keywords
61
+ i = 1
62
+ while i < len(tokens):
63
+ token = tokens[i]
64
+ token_upper = token.upper()
65
+
66
+ # Handle REM keywords that take a value
67
+ if token_upper in ("LIMIT", "DEPTH", "THRESHOLD", "TYPE", "FROM", "WITH"):
68
+ if i + 1 < len(tokens):
69
+ keyword_map = {
70
+ "LIMIT": "limit",
71
+ "DEPTH": "max_depth",
72
+ "THRESHOLD": "threshold",
73
+ "TYPE": "edge_types",
74
+ "FROM": "initial_query",
75
+ "WITH": "initial_query",
76
+ }
77
+ key = keyword_map[token_upper]
78
+ value = tokens[i + 1]
79
+ params[key] = self._convert_value(key, value)
80
+ i += 2
81
+ continue
82
+ elif "=" in token:
56
83
  # It's a keyword argument
57
84
  key, value = token.split("=", 1)
58
85
  # Handle parameter aliases
@@ -61,6 +88,7 @@ class RemQueryParser:
61
88
  else:
62
89
  # It's a positional argument part
63
90
  positional_args.append(token)
91
+ i += 1
64
92
 
65
93
  # Map positional arguments to specific fields based on QueryType
66
94
  self._map_positional_args(query_type, positional_args, params)
@@ -133,13 +161,20 @@ class RemQueryParser:
133
161
  params["query_text"] = combined_value
134
162
 
135
163
  elif query_type == QueryType.SEARCH:
136
- params["query_text"] = combined_value
164
+ # SEARCH expects: SEARCH <table> <query_text> [LIMIT n]
165
+ # First positional arg is table name, rest is query text
166
+ if len(positional_args) >= 2:
167
+ params["table_name"] = positional_args[0]
168
+ params["query_text"] = " ".join(positional_args[1:])
169
+ elif len(positional_args) == 1:
170
+ # Could be table name or query text - assume query text if no table
171
+ params["query_text"] = positional_args[0]
172
+ # If no positional args, params stays empty
137
173
 
138
174
  elif query_type == QueryType.TRAVERSE:
139
175
  params["initial_query"] = combined_value
140
176
 
141
- # SQL typically requires named arguments (table=...), but if we supported
142
- # SQL SELECT * FROM ..., we might handle it differently.
143
- # For now, RemService expects table=...
144
- # If there are positional args for SQL, we might ignore or raise,
145
- # but current service doesn't use them.
177
+ elif query_type == QueryType.SQL:
178
+ # SQL with positional args means "SQL SELECT * FROM ..." form
179
+ # Treat the combined positional args as the raw SQL query
180
+ params["raw_query"] = combined_value
@@ -13,6 +13,31 @@ Design:
13
13
  - All queries pushed down to Postgres for performance
14
14
  - Model schema inspection for validation only
15
15
  - Exceptions for missing fields/embeddings
16
+
17
+ TODO: Staged Plan Execution
18
+ - Implement execute_staged_plan() method for multi-stage query execution
19
+ - Each stage can be:
20
+ 1. Static query (query field): Execute REM dialect directly
21
+ 2. Dynamic query (intent field): LLM interprets intent + previous results to build query
22
+ - Flow for dynamic stages:
23
+ 1. Gather results from depends_on stages (from previous_results or current execution)
24
+ 2. Pass intent + previous results to LLM (like ask_rem but with context)
25
+ 3. LLM generates REM query based on what it learned from previous stages
26
+ 4. Execute generated query
27
+ 5. Store results in stage_results for client to use in continuation
28
+ - Multi-turn continuation:
29
+ - Client passes previous_results back from response's stage_results
30
+ - Client sets resume_from_stage to skip already-executed stages
31
+ - Server uses previous_results as context for depends_on lookups
32
+ - Use cases:
33
+ - LOOKUP "Sarah" → intent: "find her team members" (LLM sees Sarah's graph_edges, builds TRAVERSE)
34
+ - SEARCH "API docs" → intent: "get authors" (LLM extracts author refs, builds LOOKUP)
35
+ - Complex graph exploration with LLM-driven navigation
36
+ - API: POST /api/v1/query with:
37
+ - mode="staged-plan"
38
+ - plan=[{stage, query|intent, name, depends_on}]
39
+ - previous_results=[{stage, name, query_executed, results, count}] (for continuation)
40
+ - resume_from_stage=N (to skip completed stages)
16
41
  """
17
42
 
18
43
  from typing import Any
@@ -309,17 +334,26 @@ class RemService:
309
334
  )
310
335
 
311
336
  # Execute vector search via rem_search() PostgreSQL function
337
+ min_sim = params.min_similarity if params.min_similarity is not None else 0.3
338
+ limit = params.limit or 10
312
339
  query_params = get_search_params(
313
340
  query_embedding,
314
341
  table_name,
315
342
  field_name,
316
343
  tenant_id,
317
344
  provider,
318
- params.min_similarity or 0.7,
319
- params.limit or 10,
345
+ min_sim,
346
+ limit,
320
347
  tenant_id, # Use tenant_id (query.user_id) as user_id
321
348
  )
349
+ logger.debug(
350
+ f"SEARCH params: table={table_name}, field={field_name}, "
351
+ f"tenant_id={tenant_id}, provider={provider}, "
352
+ f"min_similarity={min_sim}, limit={limit}, "
353
+ f"embedding_dims={len(query_embedding)}"
354
+ )
322
355
  results = await self.db.execute(SEARCH_QUERY, query_params)
356
+ logger.debug(f"SEARCH results: {len(results)} rows")
323
357
 
324
358
  return {
325
359
  "query_type": "SEARCH",
@@ -14,6 +14,21 @@ from typing import Any
14
14
 
15
15
  from loguru import logger
16
16
 
17
+ # Max length for entity keys (kv_store.entity_key is varchar(255))
18
+ MAX_ENTITY_KEY_LENGTH = 255
19
+
20
+
21
+ def truncate_key(key: str, max_length: int = MAX_ENTITY_KEY_LENGTH) -> str:
22
+ """Truncate a key to max length, preserving useful suffix if possible."""
23
+ if len(key) <= max_length:
24
+ return key
25
+ # Keep first part and add hash suffix for uniqueness
26
+ import hashlib
27
+ hash_suffix = hashlib.md5(key.encode()).hexdigest()[:8]
28
+ truncated = key[:max_length - 9] + "-" + hash_suffix
29
+ logger.warning(f"Truncated key from {len(key)} to {len(truncated)} chars: {key[:50]}...")
30
+ return truncated
31
+
17
32
  from rem.models.entities import Message
18
33
  from rem.services.postgres import PostgresService, Repository
19
34
  from rem.settings import settings
@@ -151,7 +166,8 @@ class SessionMessageStore:
151
166
  return f"msg-{message_index}"
152
167
 
153
168
  # Create entity key for REM LOOKUP: session-{session_id}-msg-{index}
154
- entity_key = f"session-{session_id}-msg-{message_index}"
169
+ # Truncate to avoid exceeding kv_store.entity_key varchar(255) limit
170
+ entity_key = truncate_key(f"session-{session_id}-msg-{message_index}")
155
171
 
156
172
  # Create Message entity for assistant response
157
173
  msg = Message(
@@ -65,7 +65,7 @@ async def reload_session(
65
65
  session_id=session_id, user_id=user_id, decompress=decompress_messages
66
66
  )
67
67
 
68
- logger.info(
68
+ logger.debug(
69
69
  f"Reloaded {len(messages)} messages for session {session_id} "
70
70
  f"(decompressed={decompress_messages})"
71
71
  )