remdb 0.3.0__py3-none-any.whl → 0.3.127__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of remdb might be problematic. Click here for more details.
- rem/__init__.py +129 -2
- rem/agentic/README.md +76 -0
- rem/agentic/__init__.py +15 -0
- rem/agentic/agents/__init__.py +16 -2
- rem/agentic/agents/sse_simulator.py +502 -0
- rem/agentic/context.py +51 -25
- rem/agentic/llm_provider_models.py +301 -0
- rem/agentic/mcp/tool_wrapper.py +29 -3
- rem/agentic/otel/setup.py +93 -4
- rem/agentic/providers/phoenix.py +32 -43
- rem/agentic/providers/pydantic_ai.py +168 -24
- rem/agentic/schema.py +358 -21
- rem/agentic/tools/rem_tools.py +3 -3
- rem/api/README.md +238 -1
- rem/api/deps.py +255 -0
- rem/api/main.py +154 -37
- rem/api/mcp_router/resources.py +1 -1
- rem/api/mcp_router/server.py +26 -5
- rem/api/mcp_router/tools.py +465 -7
- rem/api/middleware/tracking.py +172 -0
- rem/api/routers/admin.py +494 -0
- rem/api/routers/auth.py +124 -0
- rem/api/routers/chat/completions.py +402 -20
- rem/api/routers/chat/models.py +88 -10
- rem/api/routers/chat/otel_utils.py +33 -0
- rem/api/routers/chat/sse_events.py +542 -0
- rem/api/routers/chat/streaming.py +642 -45
- rem/api/routers/dev.py +81 -0
- rem/api/routers/feedback.py +268 -0
- rem/api/routers/messages.py +473 -0
- rem/api/routers/models.py +78 -0
- rem/api/routers/query.py +360 -0
- rem/api/routers/shared_sessions.py +406 -0
- rem/auth/middleware.py +126 -27
- rem/cli/commands/README.md +237 -64
- rem/cli/commands/ask.py +13 -10
- rem/cli/commands/cluster.py +1808 -0
- rem/cli/commands/configure.py +5 -6
- rem/cli/commands/db.py +396 -139
- rem/cli/commands/experiments.py +293 -73
- rem/cli/commands/process.py +22 -15
- rem/cli/commands/scaffold.py +47 -0
- rem/cli/commands/schema.py +97 -50
- rem/cli/main.py +29 -6
- rem/config.py +10 -3
- rem/models/core/core_model.py +7 -1
- rem/models/core/rem_query.py +5 -2
- rem/models/entities/__init__.py +21 -0
- rem/models/entities/domain_resource.py +38 -0
- rem/models/entities/feedback.py +123 -0
- rem/models/entities/message.py +30 -1
- rem/models/entities/session.py +83 -0
- rem/models/entities/shared_session.py +180 -0
- rem/models/entities/user.py +10 -3
- rem/registry.py +373 -0
- rem/schemas/agents/rem.yaml +7 -3
- rem/services/content/providers.py +94 -140
- rem/services/content/service.py +92 -20
- rem/services/dreaming/affinity_service.py +2 -16
- rem/services/dreaming/moment_service.py +2 -15
- rem/services/embeddings/api.py +24 -17
- rem/services/embeddings/worker.py +16 -16
- rem/services/phoenix/EXPERIMENT_DESIGN.md +3 -3
- rem/services/phoenix/client.py +302 -28
- rem/services/postgres/README.md +159 -15
- rem/services/postgres/__init__.py +2 -1
- rem/services/postgres/diff_service.py +531 -0
- rem/services/postgres/pydantic_to_sqlalchemy.py +427 -129
- rem/services/postgres/repository.py +132 -0
- rem/services/postgres/schema_generator.py +291 -9
- rem/services/postgres/service.py +6 -6
- rem/services/rate_limit.py +113 -0
- rem/services/rem/README.md +14 -0
- rem/services/rem/parser.py +44 -9
- rem/services/rem/service.py +36 -2
- rem/services/session/compression.py +24 -1
- rem/services/session/reload.py +1 -1
- rem/services/user_service.py +98 -0
- rem/settings.py +313 -29
- rem/sql/background_indexes.sql +21 -16
- rem/sql/migrations/001_install.sql +387 -54
- rem/sql/migrations/002_install_models.sql +2320 -393
- rem/sql/migrations/003_optional_extensions.sql +326 -0
- rem/sql/migrations/004_cache_system.sql +548 -0
- rem/utils/__init__.py +18 -0
- rem/utils/constants.py +97 -0
- rem/utils/date_utils.py +228 -0
- rem/utils/embeddings.py +17 -4
- rem/utils/files.py +167 -0
- rem/utils/mime_types.py +158 -0
- rem/utils/model_helpers.py +156 -1
- rem/utils/schema_loader.py +282 -35
- rem/utils/sql_paths.py +146 -0
- rem/utils/sql_types.py +3 -1
- rem/utils/vision.py +9 -14
- rem/workers/README.md +14 -14
- rem/workers/__init__.py +3 -1
- rem/workers/db_listener.py +579 -0
- rem/workers/db_maintainer.py +74 -0
- rem/workers/unlogged_maintainer.py +463 -0
- {remdb-0.3.0.dist-info → remdb-0.3.127.dist-info}/METADATA +464 -289
- {remdb-0.3.0.dist-info → remdb-0.3.127.dist-info}/RECORD +104 -73
- {remdb-0.3.0.dist-info → remdb-0.3.127.dist-info}/WHEEL +1 -1
- rem/sql/002_install_models.sql +0 -1068
- rem/sql/install_models.sql +0 -1038
- {remdb-0.3.0.dist-info → remdb-0.3.127.dist-info}/entry_points.txt +0 -0
|
@@ -335,3 +335,135 @@ class Repository(Generic[T]):
|
|
|
335
335
|
row = await conn.fetchrow(sql, *params)
|
|
336
336
|
|
|
337
337
|
return row[0] if row else 0
|
|
338
|
+
|
|
339
|
+
async def find_paginated(
|
|
340
|
+
self,
|
|
341
|
+
filters: dict[str, Any],
|
|
342
|
+
page: int = 1,
|
|
343
|
+
page_size: int = 50,
|
|
344
|
+
order_by: str = "created_at DESC",
|
|
345
|
+
partition_by: str | None = None,
|
|
346
|
+
) -> dict[str, Any]:
|
|
347
|
+
"""
|
|
348
|
+
Find records with page-based pagination using CTE with ROW_NUMBER().
|
|
349
|
+
|
|
350
|
+
Uses a CTE with ROW_NUMBER() OVER (PARTITION BY ... ORDER BY ...) for
|
|
351
|
+
efficient pagination with total count in a single query.
|
|
352
|
+
|
|
353
|
+
Args:
|
|
354
|
+
filters: Dict of field -> value filters (AND-ed together)
|
|
355
|
+
page: Page number (1-indexed)
|
|
356
|
+
page_size: Number of records per page
|
|
357
|
+
order_by: ORDER BY clause for row numbering (default: "created_at DESC")
|
|
358
|
+
partition_by: Optional field to partition by (e.g., "user_id").
|
|
359
|
+
If None, uses global row numbering.
|
|
360
|
+
|
|
361
|
+
Returns:
|
|
362
|
+
Dict containing:
|
|
363
|
+
- data: List of model instances for the page
|
|
364
|
+
- total: Total count of records matching filters
|
|
365
|
+
- page: Current page number
|
|
366
|
+
- page_size: Records per page
|
|
367
|
+
- total_pages: Total number of pages
|
|
368
|
+
- has_next: Whether there are more pages
|
|
369
|
+
- has_previous: Whether there are previous pages
|
|
370
|
+
|
|
371
|
+
Example:
|
|
372
|
+
result = await repo.find_paginated(
|
|
373
|
+
{"tenant_id": "acme", "user_id": "alice"},
|
|
374
|
+
page=2,
|
|
375
|
+
page_size=20,
|
|
376
|
+
order_by="created_at DESC",
|
|
377
|
+
partition_by="user_id"
|
|
378
|
+
)
|
|
379
|
+
# result = {
|
|
380
|
+
# "data": [...],
|
|
381
|
+
# "total": 150,
|
|
382
|
+
# "page": 2,
|
|
383
|
+
# "page_size": 20,
|
|
384
|
+
# "total_pages": 8,
|
|
385
|
+
# "has_next": True,
|
|
386
|
+
# "has_previous": True
|
|
387
|
+
# }
|
|
388
|
+
"""
|
|
389
|
+
if not settings.postgres.enabled or not self.db:
|
|
390
|
+
logger.debug(f"Postgres disabled, returning empty {self.model_class.__name__} pagination")
|
|
391
|
+
return {
|
|
392
|
+
"data": [],
|
|
393
|
+
"total": 0,
|
|
394
|
+
"page": page,
|
|
395
|
+
"page_size": page_size,
|
|
396
|
+
"total_pages": 0,
|
|
397
|
+
"has_next": False,
|
|
398
|
+
"has_previous": False,
|
|
399
|
+
}
|
|
400
|
+
|
|
401
|
+
# Ensure connection
|
|
402
|
+
if not self.db.pool:
|
|
403
|
+
await self.db.connect()
|
|
404
|
+
|
|
405
|
+
# Type guard: ensure pool is not None after connect
|
|
406
|
+
if not self.db.pool:
|
|
407
|
+
raise RuntimeError("Failed to establish database connection")
|
|
408
|
+
|
|
409
|
+
# Build WHERE clause from filters
|
|
410
|
+
where_conditions = ["deleted_at IS NULL"]
|
|
411
|
+
params: list[Any] = []
|
|
412
|
+
param_idx = 1
|
|
413
|
+
|
|
414
|
+
for field, value in filters.items():
|
|
415
|
+
where_conditions.append(f"{field} = ${param_idx}")
|
|
416
|
+
params.append(value)
|
|
417
|
+
param_idx += 1
|
|
418
|
+
|
|
419
|
+
where_clause = " AND ".join(where_conditions)
|
|
420
|
+
|
|
421
|
+
# Build PARTITION BY clause
|
|
422
|
+
partition_clause = f"PARTITION BY {partition_by}" if partition_by else ""
|
|
423
|
+
|
|
424
|
+
# Build the CTE query with ROW_NUMBER() and COUNT() window functions
|
|
425
|
+
# This gives us pagination + total count in a single query
|
|
426
|
+
sql = f"""
|
|
427
|
+
WITH numbered AS (
|
|
428
|
+
SELECT *,
|
|
429
|
+
ROW_NUMBER() OVER ({partition_clause} ORDER BY {order_by}) as _row_num,
|
|
430
|
+
COUNT(*) OVER ({partition_clause}) as _total_count
|
|
431
|
+
FROM {self.table_name}
|
|
432
|
+
WHERE {where_clause}
|
|
433
|
+
)
|
|
434
|
+
SELECT * FROM numbered
|
|
435
|
+
WHERE _row_num > ${param_idx} AND _row_num <= ${param_idx + 1}
|
|
436
|
+
ORDER BY _row_num
|
|
437
|
+
"""
|
|
438
|
+
|
|
439
|
+
# Calculate row range for the page
|
|
440
|
+
start_row = (page - 1) * page_size
|
|
441
|
+
end_row = page * page_size
|
|
442
|
+
params.extend([start_row, end_row])
|
|
443
|
+
|
|
444
|
+
async with self.db.pool.acquire() as conn:
|
|
445
|
+
rows = await conn.fetch(sql, *params)
|
|
446
|
+
|
|
447
|
+
# Extract total from first row (all rows have the same _total_count)
|
|
448
|
+
total = rows[0]["_total_count"] if rows else 0
|
|
449
|
+
|
|
450
|
+
# Remove internal columns and convert to models
|
|
451
|
+
data = []
|
|
452
|
+
for row in rows:
|
|
453
|
+
row_dict = dict(row)
|
|
454
|
+
row_dict.pop("_row_num", None)
|
|
455
|
+
row_dict.pop("_total_count", None)
|
|
456
|
+
data.append(self.model_class.model_validate(row_dict))
|
|
457
|
+
|
|
458
|
+
# Calculate pagination metadata
|
|
459
|
+
total_pages = (total + page_size - 1) // page_size if total > 0 else 0
|
|
460
|
+
|
|
461
|
+
return {
|
|
462
|
+
"data": data,
|
|
463
|
+
"total": total,
|
|
464
|
+
"page": page,
|
|
465
|
+
"page_size": page_size,
|
|
466
|
+
"total_pages": total_pages,
|
|
467
|
+
"has_next": page < total_pages,
|
|
468
|
+
"has_previous": page > 1,
|
|
469
|
+
}
|
|
@@ -1,18 +1,28 @@
|
|
|
1
1
|
"""
|
|
2
2
|
Schema generation utility from Pydantic models.
|
|
3
3
|
|
|
4
|
-
|
|
4
|
+
Generates complete database schemas from:
|
|
5
|
+
1. REM's core models (Resource, Moment, User, etc.)
|
|
6
|
+
2. Models registered via rem.register_model() or rem.register_models()
|
|
7
|
+
3. Models discovered from a directory scan
|
|
8
|
+
|
|
9
|
+
Output includes:
|
|
5
10
|
- Primary tables
|
|
6
11
|
- Embeddings tables
|
|
7
12
|
- KV_STORE triggers
|
|
8
13
|
- Indexes (foreground and background)
|
|
9
14
|
- Migrations
|
|
15
|
+
- Schema table entries (for agent-like table access)
|
|
10
16
|
|
|
11
17
|
Usage:
|
|
12
18
|
from rem.services.postgres.schema_generator import SchemaGenerator
|
|
13
19
|
|
|
20
|
+
# Generate from registry (includes core + registered models)
|
|
14
21
|
generator = SchemaGenerator()
|
|
15
|
-
schema = generator.
|
|
22
|
+
schema = await generator.generate_from_registry()
|
|
23
|
+
|
|
24
|
+
# Or generate from directory (legacy)
|
|
25
|
+
schema = await generator.generate_from_directory("src/rem/models/entities")
|
|
16
26
|
|
|
17
27
|
# Write to file
|
|
18
28
|
with open("src/rem/sql/schema.sql", "w") as f:
|
|
@@ -21,14 +31,192 @@ Usage:
|
|
|
21
31
|
|
|
22
32
|
import importlib.util
|
|
23
33
|
import inspect
|
|
34
|
+
import json
|
|
35
|
+
import uuid
|
|
24
36
|
from pathlib import Path
|
|
25
|
-
from typing import Type
|
|
37
|
+
from typing import Any, Type
|
|
26
38
|
|
|
27
39
|
from loguru import logger
|
|
28
40
|
from pydantic import BaseModel
|
|
29
41
|
|
|
30
42
|
from ...settings import settings
|
|
31
|
-
from .
|
|
43
|
+
from ...utils.sql_paths import get_package_sql_dir
|
|
44
|
+
from .register_type import register_type, should_embed_field
|
|
45
|
+
|
|
46
|
+
# Namespace UUID for generating deterministic UUIDs from model names
|
|
47
|
+
# Using UUID5 with this namespace ensures same model always gets same UUID
|
|
48
|
+
REM_SCHEMA_NAMESPACE = uuid.UUID("6ba7b810-9dad-11d1-80b4-00c04fd430c8") # DNS namespace
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
def generate_model_uuid(fully_qualified_name: str) -> uuid.UUID:
|
|
52
|
+
"""
|
|
53
|
+
Generate deterministic UUID from fully qualified model name.
|
|
54
|
+
|
|
55
|
+
Uses UUID5 (SHA-1 hash) with REM namespace for reproducibility.
|
|
56
|
+
Same fully qualified name always produces same UUID.
|
|
57
|
+
|
|
58
|
+
Args:
|
|
59
|
+
fully_qualified_name: Full module path, e.g., "rem.models.entities.Resource"
|
|
60
|
+
|
|
61
|
+
Returns:
|
|
62
|
+
Deterministic UUID for this model
|
|
63
|
+
"""
|
|
64
|
+
return uuid.uuid5(REM_SCHEMA_NAMESPACE, fully_qualified_name)
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
def extract_model_schema_metadata(
|
|
68
|
+
model: Type[BaseModel],
|
|
69
|
+
table_name: str,
|
|
70
|
+
entity_key_field: str,
|
|
71
|
+
include_search_tool: bool = True,
|
|
72
|
+
) -> dict[str, Any]:
|
|
73
|
+
"""
|
|
74
|
+
Extract schema metadata from a Pydantic model for schemas table.
|
|
75
|
+
|
|
76
|
+
Args:
|
|
77
|
+
model: Pydantic model class
|
|
78
|
+
table_name: Database table name
|
|
79
|
+
entity_key_field: Field used as entity key in kv_store
|
|
80
|
+
include_search_tool: If True, add search_rem tool for querying this table
|
|
81
|
+
|
|
82
|
+
Returns:
|
|
83
|
+
Dict with schema metadata ready for schemas table insert
|
|
84
|
+
"""
|
|
85
|
+
# Get fully qualified name
|
|
86
|
+
fqn = f"{model.__module__}.{model.__name__}"
|
|
87
|
+
|
|
88
|
+
# Generate deterministic UUID
|
|
89
|
+
schema_id = generate_model_uuid(fqn)
|
|
90
|
+
|
|
91
|
+
# Get JSON schema from Pydantic
|
|
92
|
+
json_schema = model.model_json_schema()
|
|
93
|
+
|
|
94
|
+
# Find embedding fields
|
|
95
|
+
embedding_fields = []
|
|
96
|
+
for field_name, field_info in model.model_fields.items():
|
|
97
|
+
if should_embed_field(field_name, field_info):
|
|
98
|
+
embedding_fields.append(field_name)
|
|
99
|
+
|
|
100
|
+
# Build description with search capability note
|
|
101
|
+
base_description = model.__doc__ or f"Schema for {model.__name__}"
|
|
102
|
+
search_note = (
|
|
103
|
+
f"\n\nThis agent can search the `{table_name}` table using the `search_rem` tool. "
|
|
104
|
+
f"Use REM query syntax: LOOKUP for exact match, FUZZY for typo-tolerant search, "
|
|
105
|
+
f"SEARCH for semantic similarity, or SQL for complex queries."
|
|
106
|
+
) if include_search_tool else ""
|
|
107
|
+
|
|
108
|
+
# Build spec with table metadata and tools
|
|
109
|
+
# Note: default_search_table is used by create_agent to append a description
|
|
110
|
+
# suffix to the search_rem tool when loading it dynamically
|
|
111
|
+
has_embeddings = bool(embedding_fields)
|
|
112
|
+
|
|
113
|
+
spec = {
|
|
114
|
+
"type": "object",
|
|
115
|
+
"description": base_description + search_note,
|
|
116
|
+
"properties": json_schema.get("properties", {}),
|
|
117
|
+
"required": json_schema.get("required", []),
|
|
118
|
+
"json_schema_extra": {
|
|
119
|
+
"table_name": table_name,
|
|
120
|
+
"entity_key_field": entity_key_field,
|
|
121
|
+
"embedding_fields": embedding_fields,
|
|
122
|
+
"fully_qualified_name": fqn,
|
|
123
|
+
"tools": ["search_rem"] if include_search_tool else [],
|
|
124
|
+
"default_search_table": table_name,
|
|
125
|
+
"has_embeddings": has_embeddings,
|
|
126
|
+
},
|
|
127
|
+
}
|
|
128
|
+
|
|
129
|
+
# Build content (documentation)
|
|
130
|
+
content = f"""# {model.__name__}
|
|
131
|
+
|
|
132
|
+
{base_description}
|
|
133
|
+
|
|
134
|
+
## Overview
|
|
135
|
+
|
|
136
|
+
The `{model.__name__}` entity is stored in the `{table_name}` table. Each record is uniquely
|
|
137
|
+
identified by its `{entity_key_field}` field for lookups and graph traversal.
|
|
138
|
+
|
|
139
|
+
## Search Capabilities
|
|
140
|
+
|
|
141
|
+
This schema includes the `search_rem` tool which supports:
|
|
142
|
+
- **LOOKUP**: O(1) exact match by {entity_key_field} (e.g., `LOOKUP "entity-name"`)
|
|
143
|
+
- **FUZZY**: Typo-tolerant search (e.g., `FUZZY "partial" THRESHOLD 0.3`)
|
|
144
|
+
- **SEARCH**: Semantic vector search on {', '.join(embedding_fields) if embedding_fields else 'content'} (e.g., `SEARCH "concept" FROM {table_name} LIMIT 10`)
|
|
145
|
+
- **SQL**: Complex queries (e.g., `SELECT * FROM {table_name} WHERE ...`)
|
|
146
|
+
|
|
147
|
+
## Table Info
|
|
148
|
+
|
|
149
|
+
| Property | Value |
|
|
150
|
+
|----------|-------|
|
|
151
|
+
| Table | `{table_name}` |
|
|
152
|
+
| Entity Key | `{entity_key_field}` |
|
|
153
|
+
| Embedding Fields | {', '.join(f'`{f}`' for f in embedding_fields) if embedding_fields else 'None'} |
|
|
154
|
+
| Tools | {', '.join(['`search_rem`'] if include_search_tool else ['None'])} |
|
|
155
|
+
|
|
156
|
+
## Fields
|
|
157
|
+
|
|
158
|
+
"""
|
|
159
|
+
for field_name, field_info in model.model_fields.items():
|
|
160
|
+
field_type = str(field_info.annotation) if field_info.annotation else "Any"
|
|
161
|
+
field_desc = field_info.description or ""
|
|
162
|
+
required = "Required" if field_info.is_required() else "Optional"
|
|
163
|
+
content += f"### `{field_name}`\n"
|
|
164
|
+
content += f"- **Type**: `{field_type}`\n"
|
|
165
|
+
content += f"- **{required}**\n"
|
|
166
|
+
if field_desc:
|
|
167
|
+
content += f"- {field_desc}\n"
|
|
168
|
+
content += "\n"
|
|
169
|
+
|
|
170
|
+
return {
|
|
171
|
+
"id": str(schema_id),
|
|
172
|
+
"name": model.__name__,
|
|
173
|
+
"table_name": table_name,
|
|
174
|
+
"entity_key_field": entity_key_field,
|
|
175
|
+
"embedding_fields": embedding_fields,
|
|
176
|
+
"fqn": fqn,
|
|
177
|
+
"spec": spec,
|
|
178
|
+
"content": content,
|
|
179
|
+
"category": "entity",
|
|
180
|
+
}
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def generate_schema_upsert_sql(schema_metadata: dict[str, Any]) -> str:
|
|
184
|
+
"""
|
|
185
|
+
Generate SQL UPSERT statement for schemas table.
|
|
186
|
+
|
|
187
|
+
Uses ON CONFLICT DO UPDATE for idempotency.
|
|
188
|
+
|
|
189
|
+
Args:
|
|
190
|
+
schema_metadata: Dict from extract_model_schema_metadata()
|
|
191
|
+
|
|
192
|
+
Returns:
|
|
193
|
+
SQL INSERT ... ON CONFLICT statement
|
|
194
|
+
"""
|
|
195
|
+
# Escape single quotes in content and spec
|
|
196
|
+
content_escaped = schema_metadata["content"].replace("'", "''")
|
|
197
|
+
spec_json = json.dumps(schema_metadata["spec"]).replace("'", "''")
|
|
198
|
+
|
|
199
|
+
sql = f"""
|
|
200
|
+
-- Schema entry for {schema_metadata['name']} ({schema_metadata['table_name']})
|
|
201
|
+
INSERT INTO schemas (id, tenant_id, name, content, spec, category, metadata)
|
|
202
|
+
VALUES (
|
|
203
|
+
'{schema_metadata['id']}'::uuid,
|
|
204
|
+
'system',
|
|
205
|
+
'{schema_metadata['name']}',
|
|
206
|
+
'{content_escaped}',
|
|
207
|
+
'{spec_json}'::jsonb,
|
|
208
|
+
'entity',
|
|
209
|
+
'{{"table_name": "{schema_metadata['table_name']}", "entity_key_field": "{schema_metadata['entity_key_field']}", "embedding_fields": {json.dumps(schema_metadata['embedding_fields'])}, "fqn": "{schema_metadata['fqn']}"}}'::jsonb
|
|
210
|
+
)
|
|
211
|
+
ON CONFLICT (id) DO UPDATE SET
|
|
212
|
+
name = EXCLUDED.name,
|
|
213
|
+
content = EXCLUDED.content,
|
|
214
|
+
spec = EXCLUDED.spec,
|
|
215
|
+
category = EXCLUDED.category,
|
|
216
|
+
metadata = EXCLUDED.metadata,
|
|
217
|
+
updated_at = CURRENT_TIMESTAMP;
|
|
218
|
+
"""
|
|
219
|
+
return sql.strip()
|
|
32
220
|
|
|
33
221
|
|
|
34
222
|
class SchemaGenerator:
|
|
@@ -47,9 +235,9 @@ class SchemaGenerator:
|
|
|
47
235
|
Initialize schema generator.
|
|
48
236
|
|
|
49
237
|
Args:
|
|
50
|
-
output_dir: Optional directory for output files (defaults to
|
|
238
|
+
output_dir: Optional directory for output files (defaults to package sql dir)
|
|
51
239
|
"""
|
|
52
|
-
self.output_dir = output_dir or
|
|
240
|
+
self.output_dir = output_dir or get_package_sql_dir()
|
|
53
241
|
self.schemas: dict[str, dict] = {}
|
|
54
242
|
|
|
55
243
|
def discover_models(self, directory: str | Path) -> dict[str, Type[BaseModel]]:
|
|
@@ -225,15 +413,76 @@ class SchemaGenerator:
|
|
|
225
413
|
create_kv_trigger=True,
|
|
226
414
|
)
|
|
227
415
|
|
|
416
|
+
# Extract schema metadata for schemas table entry
|
|
417
|
+
schema_metadata = extract_model_schema_metadata(
|
|
418
|
+
model=model,
|
|
419
|
+
table_name=table_name,
|
|
420
|
+
entity_key_field=entity_key_field,
|
|
421
|
+
)
|
|
422
|
+
schema["schema_metadata"] = schema_metadata
|
|
423
|
+
|
|
228
424
|
self.schemas[table_name] = schema
|
|
229
425
|
return schema
|
|
230
426
|
|
|
427
|
+
async def generate_from_registry(
|
|
428
|
+
self, output_file: str | None = None, include_core: bool = True
|
|
429
|
+
) -> str:
|
|
430
|
+
"""
|
|
431
|
+
Generate complete schema from the model registry.
|
|
432
|
+
|
|
433
|
+
Includes:
|
|
434
|
+
1. REM's core models (if include_core=True)
|
|
435
|
+
2. Models registered via rem.register_model() or rem.register_models()
|
|
436
|
+
|
|
437
|
+
Args:
|
|
438
|
+
output_file: Optional output file path (relative to output_dir)
|
|
439
|
+
include_core: If True, include REM's core models (default: True)
|
|
440
|
+
|
|
441
|
+
Returns:
|
|
442
|
+
Complete SQL schema as string
|
|
443
|
+
|
|
444
|
+
Example:
|
|
445
|
+
import rem
|
|
446
|
+
from rem.models.core import CoreModel
|
|
447
|
+
|
|
448
|
+
# Register custom model
|
|
449
|
+
@rem.register_model
|
|
450
|
+
class CustomEntity(CoreModel):
|
|
451
|
+
name: str
|
|
452
|
+
|
|
453
|
+
# Generate schema (includes core + custom)
|
|
454
|
+
generator = SchemaGenerator()
|
|
455
|
+
schema = await generator.generate_from_registry()
|
|
456
|
+
"""
|
|
457
|
+
from ...registry import get_model_registry
|
|
458
|
+
|
|
459
|
+
registry = get_model_registry()
|
|
460
|
+
models = registry.get_models(include_core=include_core)
|
|
461
|
+
|
|
462
|
+
logger.info(f"Generating schema from registry: {len(models)} models")
|
|
463
|
+
|
|
464
|
+
# Generate schemas for each model
|
|
465
|
+
for model_name, ext in models.items():
|
|
466
|
+
await self.generate_schema_for_model(
|
|
467
|
+
ext.model,
|
|
468
|
+
table_name=ext.table_name,
|
|
469
|
+
entity_key_field=ext.entity_key_field,
|
|
470
|
+
)
|
|
471
|
+
|
|
472
|
+
return self._generate_sql_output(
|
|
473
|
+
source="model registry",
|
|
474
|
+
output_file=output_file,
|
|
475
|
+
)
|
|
476
|
+
|
|
231
477
|
async def generate_from_directory(
|
|
232
478
|
self, directory: str | Path, output_file: str | None = None
|
|
233
479
|
) -> str:
|
|
234
480
|
"""
|
|
235
481
|
Generate complete schema from all models in a directory.
|
|
236
482
|
|
|
483
|
+
Note: For most use cases, prefer generate_from_registry() which uses
|
|
484
|
+
the model registry pattern.
|
|
485
|
+
|
|
237
486
|
Args:
|
|
238
487
|
directory: Path to directory with Pydantic models
|
|
239
488
|
output_file: Optional output file path (relative to output_dir)
|
|
@@ -248,12 +497,31 @@ class SchemaGenerator:
|
|
|
248
497
|
for model_name, model in models.items():
|
|
249
498
|
await self.generate_schema_for_model(model)
|
|
250
499
|
|
|
251
|
-
|
|
500
|
+
return self._generate_sql_output(
|
|
501
|
+
source=f"directory: {directory}",
|
|
502
|
+
output_file=output_file,
|
|
503
|
+
)
|
|
504
|
+
|
|
505
|
+
def _generate_sql_output(
|
|
506
|
+
self, source: str, output_file: str | None = None
|
|
507
|
+
) -> str:
|
|
508
|
+
"""
|
|
509
|
+
Generate SQL output from accumulated schemas.
|
|
510
|
+
|
|
511
|
+
Args:
|
|
512
|
+
source: Description of schema source (for header comment)
|
|
513
|
+
output_file: Optional output file path (relative to output_dir)
|
|
514
|
+
|
|
515
|
+
Returns:
|
|
516
|
+
Complete SQL schema as string
|
|
517
|
+
"""
|
|
518
|
+
import datetime
|
|
519
|
+
|
|
252
520
|
sql_parts = [
|
|
253
521
|
"-- REM Model Schema (install_models.sql)",
|
|
254
522
|
"-- Generated from Pydantic models",
|
|
255
|
-
f"-- Source
|
|
256
|
-
"-- Generated at:
|
|
523
|
+
f"-- Source: {source}",
|
|
524
|
+
f"-- Generated at: {datetime.datetime.now().isoformat()}",
|
|
257
525
|
"--",
|
|
258
526
|
"-- DO NOT EDIT MANUALLY - Regenerate with: rem db schema generate",
|
|
259
527
|
"--",
|
|
@@ -262,6 +530,7 @@ class SchemaGenerator:
|
|
|
262
530
|
"-- 2. Embeddings tables (embeddings_<table>)",
|
|
263
531
|
"-- 3. KV_STORE triggers for cache maintenance",
|
|
264
532
|
"-- 4. Indexes (foreground only, background indexes separate)",
|
|
533
|
+
"-- 5. Schema table entries (for agent-like table access)",
|
|
265
534
|
"",
|
|
266
535
|
"-- ============================================================================",
|
|
267
536
|
"-- PREREQUISITES CHECK",
|
|
@@ -307,6 +576,19 @@ class SchemaGenerator:
|
|
|
307
576
|
sql_parts.append(schema["sql"]["kv_trigger"])
|
|
308
577
|
sql_parts.append("")
|
|
309
578
|
|
|
579
|
+
# Add schema table entries (every entity table is also an "agent")
|
|
580
|
+
sql_parts.append("-- ============================================================================")
|
|
581
|
+
sql_parts.append("-- SCHEMA TABLE ENTRIES")
|
|
582
|
+
sql_parts.append("-- Every entity table gets a schemas entry for agent-like access")
|
|
583
|
+
sql_parts.append("-- ============================================================================")
|
|
584
|
+
sql_parts.append("")
|
|
585
|
+
|
|
586
|
+
for table_name, schema in self.schemas.items():
|
|
587
|
+
if "schema_metadata" in schema:
|
|
588
|
+
schema_upsert = generate_schema_upsert_sql(schema["schema_metadata"])
|
|
589
|
+
sql_parts.append(schema_upsert)
|
|
590
|
+
sql_parts.append("")
|
|
591
|
+
|
|
310
592
|
# Add migration record
|
|
311
593
|
sql_parts.append("-- ============================================================================")
|
|
312
594
|
sql_parts.append("-- RECORD MIGRATION")
|
rem/services/postgres/service.py
CHANGED
|
@@ -190,19 +190,19 @@ class PostgresService:
|
|
|
190
190
|
|
|
191
191
|
async def connect(self) -> None:
|
|
192
192
|
"""Establish database connection pool."""
|
|
193
|
-
logger.
|
|
193
|
+
logger.debug(f"Connecting to PostgreSQL with pool size {self.pool_size}")
|
|
194
194
|
self.pool = await asyncpg.create_pool(
|
|
195
195
|
self.connection_string,
|
|
196
196
|
min_size=1,
|
|
197
197
|
max_size=self.pool_size,
|
|
198
198
|
init=self._init_connection, # Configure JSONB codec on each connection
|
|
199
199
|
)
|
|
200
|
-
logger.
|
|
200
|
+
logger.debug("PostgreSQL connection pool established")
|
|
201
201
|
|
|
202
202
|
# Start embedding worker if available
|
|
203
203
|
if self.embedding_worker and hasattr(self.embedding_worker, "start"):
|
|
204
204
|
await self.embedding_worker.start()
|
|
205
|
-
logger.
|
|
205
|
+
logger.debug("Embedding worker started")
|
|
206
206
|
|
|
207
207
|
async def disconnect(self) -> None:
|
|
208
208
|
"""Close database connection pool."""
|
|
@@ -211,10 +211,10 @@ class PostgresService:
|
|
|
211
211
|
# The worker will be stopped explicitly when the application shuts down
|
|
212
212
|
|
|
213
213
|
if self.pool:
|
|
214
|
-
logger.
|
|
214
|
+
logger.debug("Closing PostgreSQL connection pool")
|
|
215
215
|
await self.pool.close()
|
|
216
216
|
self.pool = None
|
|
217
|
-
logger.
|
|
217
|
+
logger.debug("PostgreSQL connection pool closed")
|
|
218
218
|
|
|
219
219
|
async def execute(
|
|
220
220
|
self,
|
|
@@ -631,7 +631,7 @@ class PostgresService:
|
|
|
631
631
|
table_name: str,
|
|
632
632
|
embedding: list[float],
|
|
633
633
|
limit: int = 10,
|
|
634
|
-
min_similarity: float = 0.
|
|
634
|
+
min_similarity: float = 0.3,
|
|
635
635
|
tenant_id: Optional[str] = None,
|
|
636
636
|
) -> list[dict[str, Any]]:
|
|
637
637
|
"""
|
|
@@ -0,0 +1,113 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Rate Limit Service - Postgres-backed rate limiting.
|
|
3
|
+
|
|
4
|
+
Implements tenant-aware, tiered rate limiting using PostgreSQL UNLOGGED tables
|
|
5
|
+
for high performance. Supports monthly quotas and short-term burst limits.
|
|
6
|
+
"""
|
|
7
|
+
|
|
8
|
+
import random
|
|
9
|
+
from datetime import datetime, timezone
|
|
10
|
+
from enum import Enum
|
|
11
|
+
from typing import Optional
|
|
12
|
+
|
|
13
|
+
from loguru import logger
|
|
14
|
+
|
|
15
|
+
from ..models.entities.user import UserTier
|
|
16
|
+
from .postgres.service import PostgresService
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class RateLimitService:
|
|
20
|
+
"""
|
|
21
|
+
Service for tracking and enforcing API rate limits.
|
|
22
|
+
|
|
23
|
+
Uses an UNLOGGED table `rate_limits` for performance.
|
|
24
|
+
Note: Counts in UNLOGGED tables may be lost on database crash/restart.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
def __init__(self, db: PostgresService):
|
|
28
|
+
self.db = db
|
|
29
|
+
|
|
30
|
+
# Rate limits configuration
|
|
31
|
+
# Format: (limit, period_seconds)
|
|
32
|
+
# This is a simple implementation. In production, move to settings.
|
|
33
|
+
self.TIER_CONFIG = {
|
|
34
|
+
UserTier.ANONYMOUS: {"limit": 1000, "period": 3600}, # 1000/hour (for testing)
|
|
35
|
+
UserTier.FREE: {"limit": 50, "period": 2592000}, # 50/month (~30 days)
|
|
36
|
+
UserTier.BASIC: {"limit": 10000, "period": 2592000}, # 10k/month
|
|
37
|
+
UserTier.PRO: {"limit": 100000, "period": 2592000}, # 100k/month
|
|
38
|
+
}
|
|
39
|
+
|
|
40
|
+
async def check_rate_limit(
|
|
41
|
+
self,
|
|
42
|
+
tenant_id: str,
|
|
43
|
+
identifier: str,
|
|
44
|
+
tier: UserTier
|
|
45
|
+
) -> tuple[bool, int, int]:
|
|
46
|
+
"""
|
|
47
|
+
Check if request is allowed under the rate limit.
|
|
48
|
+
|
|
49
|
+
Args:
|
|
50
|
+
tenant_id: Tenant identifier
|
|
51
|
+
identifier: User ID or Anonymous ID
|
|
52
|
+
tier: User subscription tier
|
|
53
|
+
|
|
54
|
+
Returns:
|
|
55
|
+
Tuple (is_allowed, current_count, limit)
|
|
56
|
+
"""
|
|
57
|
+
config = self.TIER_CONFIG.get(tier, self.TIER_CONFIG[UserTier.FREE])
|
|
58
|
+
limit = config["limit"]
|
|
59
|
+
period = config["period"]
|
|
60
|
+
|
|
61
|
+
# Construct time-window key
|
|
62
|
+
now = datetime.now(timezone.utc)
|
|
63
|
+
|
|
64
|
+
if period >= 2592000: # Monthly
|
|
65
|
+
time_key = now.strftime("%Y-%m")
|
|
66
|
+
elif period >= 86400: # Daily
|
|
67
|
+
time_key = now.strftime("%Y-%m-%d")
|
|
68
|
+
elif period >= 3600: # Hourly
|
|
69
|
+
time_key = now.strftime("%Y-%m-%d-%H")
|
|
70
|
+
else: # Minute/Second (fallback)
|
|
71
|
+
time_key = int(now.timestamp() / period)
|
|
72
|
+
|
|
73
|
+
key = f"{tenant_id}:{identifier}:{tier.value}:{time_key}"
|
|
74
|
+
|
|
75
|
+
# Calculate expiry (for cleanup)
|
|
76
|
+
expires_at = now.timestamp() + period
|
|
77
|
+
|
|
78
|
+
# Atomic UPSERT to increment counter
|
|
79
|
+
# Returns the new count
|
|
80
|
+
query = """
|
|
81
|
+
INSERT INTO rate_limits (key, count, expires_at)
|
|
82
|
+
VALUES ($1, 1, to_timestamp($2))
|
|
83
|
+
ON CONFLICT (key) DO UPDATE
|
|
84
|
+
SET count = rate_limits.count + 1
|
|
85
|
+
RETURNING count;
|
|
86
|
+
"""
|
|
87
|
+
|
|
88
|
+
try:
|
|
89
|
+
count = await self.db.fetchval(query, key, expires_at)
|
|
90
|
+
except Exception as e:
|
|
91
|
+
logger.error(f"Rate limit check failed: {e}")
|
|
92
|
+
# Fail open to avoid blocking users on DB error
|
|
93
|
+
return True, 0, limit
|
|
94
|
+
|
|
95
|
+
is_allowed = count <= limit
|
|
96
|
+
|
|
97
|
+
# Probabilistic cleanup (1% chance)
|
|
98
|
+
if random.random() < 0.01:
|
|
99
|
+
await self.cleanup_expired()
|
|
100
|
+
|
|
101
|
+
return is_allowed, count, limit
|
|
102
|
+
|
|
103
|
+
async def cleanup_expired(self):
|
|
104
|
+
"""Remove expired rate limit keys."""
|
|
105
|
+
try:
|
|
106
|
+
# Use a small limit to avoid locking/long queries
|
|
107
|
+
query = """
|
|
108
|
+
DELETE FROM rate_limits
|
|
109
|
+
WHERE expires_at < NOW()
|
|
110
|
+
"""
|
|
111
|
+
await self.db.execute(query)
|
|
112
|
+
except Exception as e:
|
|
113
|
+
logger.warning(f"Rate limit cleanup failed: {e}")
|
rem/services/rem/README.md
CHANGED
|
@@ -302,3 +302,17 @@ See `tests/integration/test_rem_query_evolution.py` for stage-based validation a
|
|
|
302
302
|
* **Unified View**: The underlying SQL function `rem_traverse` uses a view `all_graph_edges` that unions `graph_edges` from all entity tables (`resources`, `moments`, `users`, etc.). This enables polymorphic traversal without complex joins in the application layer.
|
|
303
303
|
* **KV Store**: Edge destinations (`dst`) are resolved to entity IDs using the `kv_store`. This requires that all traversable entities have an entry in the `kv_store` (handled automatically by database triggers).
|
|
304
304
|
* **Iterated Retrieval**: REM is architected for multi-turn retrieval where LLMs conduct conversational database exploration. Each query informs the next, enabling emergent information discovery without requiring upfront schema knowledge.
|
|
305
|
+
|
|
306
|
+
## Scaling & Architectural Decisions
|
|
307
|
+
|
|
308
|
+
### 1. Hybrid Adjacency List
|
|
309
|
+
REM implements a **Hybrid Adjacency List** pattern to balance strict relational guarantees with graph flexibility:
|
|
310
|
+
* **Primary Storage (Source of Truth):** Standard PostgreSQL tables (`resources`, `moments`, etc.) enforce schema validation, constraints, and type safety.
|
|
311
|
+
* **Graph Overlay:** Relationships are stored as "inline edges" within a JSONB column (`graph_edges`) on each entity.
|
|
312
|
+
* **Performance Layer:** A denormalized `UNLOGGED` table (`kv_store`) acts as a high-speed cache, mapping human-readable keys to internal UUIDs and edges. This avoids the traditional "join bomb" of traversing normalized SQL tables while avoiding the operational complexity of a separate graph database (e.g., Neo4j).
|
|
313
|
+
|
|
314
|
+
### 2. The Pareto Principle in Graph Algorithms
|
|
315
|
+
We explicitly choose **Simplicity over Full-Scale Graph Analytics**.
|
|
316
|
+
* **Hypothesis:** For LLM Agent workloads, 80% of the value is derived from **local context retrieval** (1-3 hops via `LOOKUP` and `TRAVERSE`).
|
|
317
|
+
* **Diminishing Returns:** Global graph algorithms (PageRank, Community Detection) offer diminishing returns for real-time agentic retrieval tasks. Agents typically need to answer specific questions ("Who worked on file X?"), which is a local neighborhood problem, not a global cluster analysis problem.
|
|
318
|
+
* **Future Scaling:** If deeper analysis is needed, we prefer **Graph + Vector (RAG)** approaches (using semantic similarity to find implicit links) over complex explicit graph algorithms.
|