remdb 0.3.181__py3-none-any.whl → 0.3.202__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of remdb might be problematic. Click here for more details.
- rem/agentic/context.py +101 -0
- rem/agentic/context_builder.py +12 -2
- rem/api/main.py +1 -1
- rem/api/mcp_router/server.py +4 -0
- rem/api/mcp_router/tools.py +395 -159
- rem/api/routers/auth.py +43 -1
- rem/api/routers/chat/completions.py +51 -9
- rem/api/routers/chat/sse_events.py +2 -2
- rem/api/routers/chat/streaming.py +146 -21
- rem/api/routers/messages.py +96 -23
- rem/auth/jwt.py +19 -4
- rem/auth/middleware.py +42 -28
- rem/cli/README.md +62 -0
- rem/cli/commands/db.py +33 -19
- rem/cli/commands/process.py +171 -43
- rem/models/entities/ontology.py +18 -20
- rem/services/content/service.py +18 -5
- rem/services/postgres/__init__.py +28 -3
- rem/services/postgres/diff_service.py +57 -5
- rem/services/postgres/programmable_diff_service.py +635 -0
- rem/services/postgres/pydantic_to_sqlalchemy.py +2 -2
- rem/services/postgres/register_type.py +11 -10
- rem/services/session/__init__.py +7 -1
- rem/services/session/compression.py +42 -2
- rem/services/session/pydantic_messages.py +210 -0
- rem/sql/migrations/001_install.sql +125 -7
- rem/sql/migrations/002_install_models.sql +136 -126
- rem/sql/migrations/004_cache_system.sql +7 -275
- rem/utils/schema_loader.py +6 -6
- {remdb-0.3.181.dist-info → remdb-0.3.202.dist-info}/METADATA +1 -1
- {remdb-0.3.181.dist-info → remdb-0.3.202.dist-info}/RECORD +33 -31
- {remdb-0.3.181.dist-info → remdb-0.3.202.dist-info}/WHEEL +0 -0
- {remdb-0.3.181.dist-info → remdb-0.3.202.dist-info}/entry_points.txt +0 -0
rem/services/content/service.py
CHANGED
|
@@ -274,7 +274,7 @@ class ContentService:
|
|
|
274
274
|
async def ingest_file(
|
|
275
275
|
self,
|
|
276
276
|
file_uri: str,
|
|
277
|
-
user_id: str,
|
|
277
|
+
user_id: str | None = None,
|
|
278
278
|
category: str | None = None,
|
|
279
279
|
tags: list[str] | None = None,
|
|
280
280
|
is_local_server: bool = False,
|
|
@@ -283,6 +283,10 @@ class ContentService:
|
|
|
283
283
|
"""
|
|
284
284
|
Complete file ingestion pipeline: read → store → parse → chunk → embed.
|
|
285
285
|
|
|
286
|
+
**IMPORTANT: Data is PUBLIC by default (user_id=None).**
|
|
287
|
+
This is correct for shared knowledge bases (ontologies, procedures, reference data).
|
|
288
|
+
Private user-scoped data is rarely needed - only set user_id for truly personal content.
|
|
289
|
+
|
|
286
290
|
**CENTRALIZED INGESTION**: This is the single entry point for all file ingestion
|
|
287
291
|
in REM. It handles:
|
|
288
292
|
|
|
@@ -319,7 +323,9 @@ class ContentService:
|
|
|
319
323
|
|
|
320
324
|
Args:
|
|
321
325
|
file_uri: Source file location (local path, s3://, or https://)
|
|
322
|
-
user_id: User identifier for data
|
|
326
|
+
user_id: User identifier for PRIVATE data only. Default None = PUBLIC/shared.
|
|
327
|
+
Leave as None for shared knowledge bases, ontologies, reference data.
|
|
328
|
+
Only set for truly private user-specific content.
|
|
323
329
|
category: Optional category tag (document, code, audio, etc.)
|
|
324
330
|
tags: Optional list of tags
|
|
325
331
|
is_local_server: True if running as local/stdio MCP server
|
|
@@ -347,12 +353,19 @@ class ContentService:
|
|
|
347
353
|
|
|
348
354
|
Example:
|
|
349
355
|
>>> service = ContentService()
|
|
356
|
+
>>> # PUBLIC data (default) - visible to all users
|
|
350
357
|
>>> result = await service.ingest_file(
|
|
351
|
-
... file_uri="s3://bucket/
|
|
352
|
-
...
|
|
353
|
-
... category="legal"
|
|
358
|
+
... file_uri="s3://bucket/procedure.pdf",
|
|
359
|
+
... category="medical"
|
|
354
360
|
... )
|
|
355
361
|
>>> print(f"Created {result['resources_created']} searchable chunks")
|
|
362
|
+
>>>
|
|
363
|
+
>>> # PRIVATE data (rare) - only for user-specific content
|
|
364
|
+
>>> result = await service.ingest_file(
|
|
365
|
+
... file_uri="s3://bucket/personal-notes.pdf",
|
|
366
|
+
... user_id="user-123", # Only this user can access
|
|
367
|
+
... category="personal"
|
|
368
|
+
... )
|
|
356
369
|
"""
|
|
357
370
|
from pathlib import Path
|
|
358
371
|
from uuid import uuid4
|
|
@@ -3,22 +3,47 @@ PostgreSQL service for CloudNativePG database operations.
|
|
|
3
3
|
"""
|
|
4
4
|
|
|
5
5
|
from .diff_service import DiffService, SchemaDiff
|
|
6
|
+
from .programmable_diff_service import (
|
|
7
|
+
DiffResult,
|
|
8
|
+
ObjectDiff,
|
|
9
|
+
ObjectType,
|
|
10
|
+
ProgrammableDiffService,
|
|
11
|
+
)
|
|
6
12
|
from .repository import Repository
|
|
7
13
|
from .service import PostgresService
|
|
8
14
|
|
|
9
15
|
|
|
16
|
+
_postgres_instance: PostgresService | None = None
|
|
17
|
+
|
|
18
|
+
|
|
10
19
|
def get_postgres_service() -> PostgresService | None:
|
|
11
20
|
"""
|
|
12
|
-
Get PostgresService instance.
|
|
21
|
+
Get PostgresService singleton instance.
|
|
13
22
|
|
|
14
23
|
Returns None if Postgres is disabled.
|
|
24
|
+
Uses singleton pattern to prevent connection pool exhaustion.
|
|
15
25
|
"""
|
|
26
|
+
global _postgres_instance
|
|
27
|
+
|
|
16
28
|
from ...settings import settings
|
|
17
29
|
|
|
18
30
|
if not settings.postgres.enabled:
|
|
19
31
|
return None
|
|
20
32
|
|
|
21
|
-
|
|
33
|
+
if _postgres_instance is None:
|
|
34
|
+
_postgres_instance = PostgresService()
|
|
35
|
+
|
|
36
|
+
return _postgres_instance
|
|
22
37
|
|
|
23
38
|
|
|
24
|
-
__all__ = [
|
|
39
|
+
__all__ = [
|
|
40
|
+
"DiffResult",
|
|
41
|
+
"DiffService",
|
|
42
|
+
"ObjectDiff",
|
|
43
|
+
"ObjectType",
|
|
44
|
+
"PostgresService",
|
|
45
|
+
"ProgrammableDiffService",
|
|
46
|
+
"Repository",
|
|
47
|
+
"SchemaDiff",
|
|
48
|
+
"get_postgres_service",
|
|
49
|
+
]
|
|
@@ -5,12 +5,17 @@ Uses Alembic autogenerate to detect differences between:
|
|
|
5
5
|
- Target schema (derived from Pydantic models)
|
|
6
6
|
- Current database schema
|
|
7
7
|
|
|
8
|
+
Also compares programmable objects (functions, triggers, views) which
|
|
9
|
+
Alembic does not track.
|
|
10
|
+
|
|
8
11
|
This enables:
|
|
9
12
|
1. Local development: See what would change before applying migrations
|
|
10
13
|
2. CI validation: Detect drift between code and database (--check mode)
|
|
11
14
|
3. Migration generation: Create incremental migration files
|
|
12
15
|
"""
|
|
13
16
|
|
|
17
|
+
import asyncio
|
|
18
|
+
import re
|
|
14
19
|
from dataclasses import dataclass, field
|
|
15
20
|
from pathlib import Path
|
|
16
21
|
from typing import Optional
|
|
@@ -51,11 +56,14 @@ class SchemaDiff:
|
|
|
51
56
|
sql: str = ""
|
|
52
57
|
upgrade_ops: Optional[ops.UpgradeOps] = None
|
|
53
58
|
filtered_count: int = 0 # Number of operations filtered out by strategy
|
|
59
|
+
# Programmable objects (functions, triggers, views)
|
|
60
|
+
programmable_summary: list[str] = field(default_factory=list)
|
|
61
|
+
programmable_sql: str = ""
|
|
54
62
|
|
|
55
63
|
@property
|
|
56
64
|
def change_count(self) -> int:
|
|
57
65
|
"""Total number of detected changes."""
|
|
58
|
-
return len(self.summary)
|
|
66
|
+
return len(self.summary) + len(self.programmable_summary)
|
|
59
67
|
|
|
60
68
|
|
|
61
69
|
class DiffService:
|
|
@@ -127,10 +135,13 @@ class DiffService:
|
|
|
127
135
|
# These are now generated in pydantic_to_sqlalchemy
|
|
128
136
|
return True
|
|
129
137
|
|
|
130
|
-
def compute_diff(self) -> SchemaDiff:
|
|
138
|
+
def compute_diff(self, include_programmable: bool = True) -> SchemaDiff:
|
|
131
139
|
"""
|
|
132
140
|
Compare Pydantic models against database and return differences.
|
|
133
141
|
|
|
142
|
+
Args:
|
|
143
|
+
include_programmable: If True, also diff functions/triggers/views
|
|
144
|
+
|
|
134
145
|
Returns:
|
|
135
146
|
SchemaDiff with detected changes
|
|
136
147
|
"""
|
|
@@ -167,21 +178,62 @@ class DiffService:
|
|
|
167
178
|
for op in filtered_ops:
|
|
168
179
|
summary.extend(self._describe_operation(op))
|
|
169
180
|
|
|
170
|
-
has_changes = len(summary) > 0
|
|
171
|
-
|
|
172
181
|
# Generate SQL if there are changes
|
|
173
182
|
sql = ""
|
|
174
|
-
if
|
|
183
|
+
if summary and upgrade_ops:
|
|
175
184
|
sql = self._render_sql(upgrade_ops, engine)
|
|
176
185
|
|
|
186
|
+
# Programmable objects diff (functions, triggers, views)
|
|
187
|
+
programmable_summary = []
|
|
188
|
+
programmable_sql = ""
|
|
189
|
+
if include_programmable:
|
|
190
|
+
prog_summary, prog_sql = self._compute_programmable_diff()
|
|
191
|
+
programmable_summary = prog_summary
|
|
192
|
+
programmable_sql = prog_sql
|
|
193
|
+
|
|
194
|
+
has_changes = len(summary) > 0 or len(programmable_summary) > 0
|
|
195
|
+
|
|
177
196
|
return SchemaDiff(
|
|
178
197
|
has_changes=has_changes,
|
|
179
198
|
summary=summary,
|
|
180
199
|
sql=sql,
|
|
181
200
|
upgrade_ops=upgrade_ops,
|
|
182
201
|
filtered_count=filtered_count,
|
|
202
|
+
programmable_summary=programmable_summary,
|
|
203
|
+
programmable_sql=programmable_sql,
|
|
183
204
|
)
|
|
184
205
|
|
|
206
|
+
def _compute_programmable_diff(self) -> tuple[list[str], str]:
|
|
207
|
+
"""
|
|
208
|
+
Compute diff for programmable objects (functions, triggers, views).
|
|
209
|
+
|
|
210
|
+
Returns:
|
|
211
|
+
Tuple of (summary_lines, sync_sql)
|
|
212
|
+
"""
|
|
213
|
+
from .programmable_diff_service import ProgrammableDiffService
|
|
214
|
+
|
|
215
|
+
service = ProgrammableDiffService()
|
|
216
|
+
|
|
217
|
+
# Run async diff in sync context
|
|
218
|
+
try:
|
|
219
|
+
loop = asyncio.get_event_loop()
|
|
220
|
+
except RuntimeError:
|
|
221
|
+
loop = asyncio.new_event_loop()
|
|
222
|
+
asyncio.set_event_loop(loop)
|
|
223
|
+
|
|
224
|
+
result = loop.run_until_complete(service.compute_diff())
|
|
225
|
+
|
|
226
|
+
summary = []
|
|
227
|
+
for diff in result.diffs:
|
|
228
|
+
if diff.status == "missing":
|
|
229
|
+
summary.append(f"+ {diff.object_type.value.upper()} {diff.name} (missing)")
|
|
230
|
+
elif diff.status == "different":
|
|
231
|
+
summary.append(f"~ {diff.object_type.value.upper()} {diff.name} (different)")
|
|
232
|
+
elif diff.status == "extra":
|
|
233
|
+
summary.append(f"- {diff.object_type.value.upper()} {diff.name} (extra in db)")
|
|
234
|
+
|
|
235
|
+
return summary, result.sync_sql
|
|
236
|
+
|
|
185
237
|
def _filter_operations(self, operations: list) -> tuple[list, int]:
|
|
186
238
|
"""
|
|
187
239
|
Filter operations based on migration strategy.
|