remdb 0.3.180__py3-none-any.whl → 0.3.230__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (54) hide show
  1. rem/agentic/README.md +36 -2
  2. rem/agentic/context.py +173 -0
  3. rem/agentic/context_builder.py +12 -2
  4. rem/agentic/mcp/tool_wrapper.py +2 -2
  5. rem/agentic/providers/pydantic_ai.py +1 -1
  6. rem/agentic/schema.py +2 -2
  7. rem/api/main.py +1 -1
  8. rem/api/mcp_router/server.py +4 -0
  9. rem/api/mcp_router/tools.py +542 -166
  10. rem/api/routers/admin.py +30 -4
  11. rem/api/routers/auth.py +106 -10
  12. rem/api/routers/chat/child_streaming.py +379 -0
  13. rem/api/routers/chat/completions.py +74 -37
  14. rem/api/routers/chat/sse_events.py +7 -3
  15. rem/api/routers/chat/streaming.py +352 -257
  16. rem/api/routers/chat/streaming_utils.py +327 -0
  17. rem/api/routers/common.py +18 -0
  18. rem/api/routers/dev.py +7 -1
  19. rem/api/routers/feedback.py +9 -1
  20. rem/api/routers/messages.py +176 -38
  21. rem/api/routers/models.py +9 -1
  22. rem/api/routers/query.py +12 -1
  23. rem/api/routers/shared_sessions.py +16 -0
  24. rem/auth/jwt.py +19 -4
  25. rem/auth/middleware.py +42 -28
  26. rem/cli/README.md +62 -0
  27. rem/cli/commands/ask.py +61 -81
  28. rem/cli/commands/db.py +55 -31
  29. rem/cli/commands/process.py +171 -43
  30. rem/models/entities/ontology.py +18 -20
  31. rem/schemas/agents/rem.yaml +1 -1
  32. rem/services/content/service.py +18 -5
  33. rem/services/embeddings/worker.py +26 -12
  34. rem/services/postgres/__init__.py +28 -3
  35. rem/services/postgres/diff_service.py +57 -5
  36. rem/services/postgres/programmable_diff_service.py +635 -0
  37. rem/services/postgres/pydantic_to_sqlalchemy.py +2 -2
  38. rem/services/postgres/register_type.py +11 -10
  39. rem/services/postgres/repository.py +39 -29
  40. rem/services/postgres/schema_generator.py +5 -5
  41. rem/services/postgres/sql_builder.py +6 -5
  42. rem/services/session/__init__.py +8 -1
  43. rem/services/session/compression.py +40 -2
  44. rem/services/session/pydantic_messages.py +292 -0
  45. rem/settings.py +28 -0
  46. rem/sql/migrations/001_install.sql +125 -7
  47. rem/sql/migrations/002_install_models.sql +159 -149
  48. rem/sql/migrations/004_cache_system.sql +7 -275
  49. rem/sql/migrations/migrate_session_id_to_uuid.sql +45 -0
  50. rem/utils/schema_loader.py +79 -51
  51. {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/METADATA +2 -2
  52. {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/RECORD +54 -48
  53. {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/WHEEL +0 -0
  54. {remdb-0.3.180.dist-info → remdb-0.3.230.dist-info}/entry_points.txt +0 -0
@@ -103,32 +103,30 @@ class Ontology(CoreModel):
103
103
  tags=["cv", "engineering"]
104
104
  )
105
105
 
106
- # Direct-loaded: Medical knowledge base from git
107
- disorder_ontology = Ontology(
108
- name="panic-disorder",
109
- uri="git://bwolfson-siggie/Siggy-MVP/ontology/disorders/anxiety/panic-disorder.md",
110
- content="# Panic Disorder\\n\\nPanic disorder is characterized by...",
106
+ # Direct-loaded: Knowledge base from git
107
+ api_docs = Ontology(
108
+ name="rest-api-guide",
109
+ uri="git://example-org/docs/api/rest-api-guide.md",
110
+ content="# REST API Guide\\n\\nThis guide covers RESTful API design...",
111
111
  extracted_data={
112
- "type": "disorder",
113
- "category": "anxiety",
114
- "icd10": "F41.0",
115
- "dsm5_criteria": ["A", "B", "C", "D"],
112
+ "type": "documentation",
113
+ "category": "api",
114
+ "version": "2.0",
116
115
  },
117
- tags=["disorder", "anxiety", "dsm5"]
116
+ tags=["api", "rest", "documentation"]
118
117
  )
119
118
 
120
- # Direct-loaded: Clinical procedure from git
121
- scid_node = Ontology(
122
- name="scid-5-f1",
123
- uri="git://bwolfson-siggie/Siggy-MVP/ontology/procedures/scid-5/module-f/scid-5-f1.md",
124
- content="# scid-5-f1: Panic Attack Screening\\n\\n...",
119
+ # Direct-loaded: Technical spec from git
120
+ config_spec = Ontology(
121
+ name="config-schema",
122
+ uri="git://example-org/docs/specs/config-schema.md",
123
+ content="# Configuration Schema\\n\\nThis document defines...",
125
124
  extracted_data={
126
- "type": "procedure",
127
- "module": "F",
128
- "section": "Panic Disorder",
129
- "dsm5_criterion": "Panic Attack Specifier",
125
+ "type": "specification",
126
+ "format": "yaml",
127
+ "version": "1.0",
130
128
  },
131
- tags=["scid-5", "procedure", "anxiety"]
129
+ tags=["config", "schema", "specification"]
132
130
  )
133
131
  """
134
132
 
@@ -124,7 +124,7 @@ json_schema_extra:
124
124
 
125
125
  # Explicit resource declarations for reference data
126
126
  resources:
127
- - uri: rem://schemas
127
+ - uri: rem://agents
128
128
  name: Agent Schemas List
129
129
  description: List all available agent schemas in the system
130
130
  - uri: rem://status
@@ -274,7 +274,7 @@ class ContentService:
274
274
  async def ingest_file(
275
275
  self,
276
276
  file_uri: str,
277
- user_id: str,
277
+ user_id: str | None = None,
278
278
  category: str | None = None,
279
279
  tags: list[str] | None = None,
280
280
  is_local_server: bool = False,
@@ -283,6 +283,10 @@ class ContentService:
283
283
  """
284
284
  Complete file ingestion pipeline: read → store → parse → chunk → embed.
285
285
 
286
+ **IMPORTANT: Data is PUBLIC by default (user_id=None).**
287
+ This is correct for shared knowledge bases (ontologies, procedures, reference data).
288
+ Private user-scoped data is rarely needed - only set user_id for truly personal content.
289
+
286
290
  **CENTRALIZED INGESTION**: This is the single entry point for all file ingestion
287
291
  in REM. It handles:
288
292
 
@@ -319,7 +323,9 @@ class ContentService:
319
323
 
320
324
  Args:
321
325
  file_uri: Source file location (local path, s3://, or https://)
322
- user_id: User identifier for data isolation and ownership
326
+ user_id: User identifier for PRIVATE data only. Default None = PUBLIC/shared.
327
+ Leave as None for shared knowledge bases, ontologies, reference data.
328
+ Only set for truly private user-specific content.
323
329
  category: Optional category tag (document, code, audio, etc.)
324
330
  tags: Optional list of tags
325
331
  is_local_server: True if running as local/stdio MCP server
@@ -347,12 +353,19 @@ class ContentService:
347
353
 
348
354
  Example:
349
355
  >>> service = ContentService()
356
+ >>> # PUBLIC data (default) - visible to all users
350
357
  >>> result = await service.ingest_file(
351
- ... file_uri="s3://bucket/contract.pdf",
352
- ... user_id="user-123",
353
- ... category="legal"
358
+ ... file_uri="s3://bucket/procedure.pdf",
359
+ ... category="medical"
354
360
  ... )
355
361
  >>> print(f"Created {result['resources_created']} searchable chunks")
362
+ >>>
363
+ >>> # PRIVATE data (rare) - only for user-specific content
364
+ >>> result = await service.ingest_file(
365
+ ... file_uri="s3://bucket/personal-notes.pdf",
366
+ ... user_id="user-123", # Only this user can access
367
+ ... category="personal"
368
+ ... )
356
369
  """
357
370
  from pathlib import Path
358
371
  from uuid import uuid4
@@ -23,6 +23,8 @@ Future:
23
23
  import asyncio
24
24
  import os
25
25
  from typing import Any, Optional
26
+ import hashlib
27
+ import uuid
26
28
  from uuid import uuid4
27
29
 
28
30
  import httpx
@@ -108,6 +110,7 @@ class EmbeddingWorker:
108
110
  self.task_queue: asyncio.Queue = asyncio.Queue()
109
111
  self.workers: list[asyncio.Task] = []
110
112
  self.running = False
113
+ self._in_flight_count = 0 # Track tasks being processed (not just in queue)
111
114
 
112
115
  # Store API key for direct HTTP requests
113
116
  from ...settings import settings
@@ -143,17 +146,18 @@ class EmbeddingWorker:
143
146
  return
144
147
 
145
148
  queue_size = self.task_queue.qsize()
146
- logger.debug(f"Stopping EmbeddingWorker (processing {queue_size} queued tasks first)")
149
+ in_flight = self._in_flight_count
150
+ logger.debug(f"Stopping EmbeddingWorker (queue={queue_size}, in_flight={in_flight})")
147
151
 
148
- # Wait for queue to drain (with timeout)
152
+ # Wait for both queue to drain AND in-flight tasks to complete
149
153
  max_wait = 30 # 30 seconds max
150
154
  waited = 0.0
151
- while not self.task_queue.empty() and waited < max_wait:
155
+ while (not self.task_queue.empty() or self._in_flight_count > 0) and waited < max_wait:
152
156
  await asyncio.sleep(0.5)
153
157
  waited += 0.5
154
158
 
155
- if not self.task_queue.empty():
156
- remaining = self.task_queue.qsize()
159
+ if not self.task_queue.empty() or self._in_flight_count > 0:
160
+ remaining = self.task_queue.qsize() + self._in_flight_count
157
161
  logger.warning(
158
162
  f"EmbeddingWorker timeout: {remaining} tasks remaining after {max_wait}s"
159
163
  )
@@ -205,12 +209,18 @@ class EmbeddingWorker:
205
209
  if not batch:
206
210
  continue
207
211
 
208
- logger.debug(f"Worker {worker_id} processing batch of {len(batch)} tasks")
212
+ # Track in-flight tasks
213
+ self._in_flight_count += len(batch)
209
214
 
210
- # Generate embeddings for batch
211
- await self._process_batch(batch)
215
+ logger.debug(f"Worker {worker_id} processing batch of {len(batch)} tasks")
212
216
 
213
- logger.debug(f"Worker {worker_id} completed batch")
217
+ try:
218
+ # Generate embeddings for batch
219
+ await self._process_batch(batch)
220
+ logger.debug(f"Worker {worker_id} completed batch")
221
+ finally:
222
+ # Always decrement in-flight count, even on error
223
+ self._in_flight_count -= len(batch)
214
224
 
215
225
  except asyncio.CancelledError:
216
226
  logger.debug(f"Worker {worker_id} cancelled")
@@ -373,7 +383,11 @@ class EmbeddingWorker:
373
383
  for task, embedding in zip(tasks, embeddings):
374
384
  table_name = f"embeddings_{task.table_name}"
375
385
 
376
- # Build upsert SQL
386
+ # Generate deterministic ID from key fields (entity_id, field_name, provider)
387
+ key_string = f"{task.entity_id}:{task.field_name}:{task.provider}"
388
+ embedding_id = str(uuid.UUID(hashlib.md5(key_string.encode()).hexdigest()))
389
+
390
+ # Build upsert SQL - conflict on deterministic ID
377
391
  sql = f"""
378
392
  INSERT INTO {table_name} (
379
393
  id,
@@ -386,7 +400,7 @@ class EmbeddingWorker:
386
400
  updated_at
387
401
  )
388
402
  VALUES ($1, $2, $3, $4, $5, $6, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
389
- ON CONFLICT (entity_id, field_name, provider)
403
+ ON CONFLICT (id)
390
404
  DO UPDATE SET
391
405
  model = EXCLUDED.model,
392
406
  embedding = EXCLUDED.embedding,
@@ -400,7 +414,7 @@ class EmbeddingWorker:
400
414
  await self.postgres_service.execute(
401
415
  sql,
402
416
  (
403
- str(uuid4()),
417
+ embedding_id,
404
418
  task.entity_id,
405
419
  task.field_name,
406
420
  task.provider,
@@ -3,22 +3,47 @@ PostgreSQL service for CloudNativePG database operations.
3
3
  """
4
4
 
5
5
  from .diff_service import DiffService, SchemaDiff
6
+ from .programmable_diff_service import (
7
+ DiffResult,
8
+ ObjectDiff,
9
+ ObjectType,
10
+ ProgrammableDiffService,
11
+ )
6
12
  from .repository import Repository
7
13
  from .service import PostgresService
8
14
 
9
15
 
16
+ _postgres_instance: PostgresService | None = None
17
+
18
+
10
19
  def get_postgres_service() -> PostgresService | None:
11
20
  """
12
- Get PostgresService instance.
21
+ Get PostgresService singleton instance.
13
22
 
14
23
  Returns None if Postgres is disabled.
24
+ Uses singleton pattern to prevent connection pool exhaustion.
15
25
  """
26
+ global _postgres_instance
27
+
16
28
  from ...settings import settings
17
29
 
18
30
  if not settings.postgres.enabled:
19
31
  return None
20
32
 
21
- return PostgresService()
33
+ if _postgres_instance is None:
34
+ _postgres_instance = PostgresService()
35
+
36
+ return _postgres_instance
22
37
 
23
38
 
24
- __all__ = ["PostgresService", "get_postgres_service", "Repository", "DiffService", "SchemaDiff"]
39
+ __all__ = [
40
+ "DiffResult",
41
+ "DiffService",
42
+ "ObjectDiff",
43
+ "ObjectType",
44
+ "PostgresService",
45
+ "ProgrammableDiffService",
46
+ "Repository",
47
+ "SchemaDiff",
48
+ "get_postgres_service",
49
+ ]
@@ -5,12 +5,17 @@ Uses Alembic autogenerate to detect differences between:
5
5
  - Target schema (derived from Pydantic models)
6
6
  - Current database schema
7
7
 
8
+ Also compares programmable objects (functions, triggers, views) which
9
+ Alembic does not track.
10
+
8
11
  This enables:
9
12
  1. Local development: See what would change before applying migrations
10
13
  2. CI validation: Detect drift between code and database (--check mode)
11
14
  3. Migration generation: Create incremental migration files
12
15
  """
13
16
 
17
+ import asyncio
18
+ import re
14
19
  from dataclasses import dataclass, field
15
20
  from pathlib import Path
16
21
  from typing import Optional
@@ -51,11 +56,14 @@ class SchemaDiff:
51
56
  sql: str = ""
52
57
  upgrade_ops: Optional[ops.UpgradeOps] = None
53
58
  filtered_count: int = 0 # Number of operations filtered out by strategy
59
+ # Programmable objects (functions, triggers, views)
60
+ programmable_summary: list[str] = field(default_factory=list)
61
+ programmable_sql: str = ""
54
62
 
55
63
  @property
56
64
  def change_count(self) -> int:
57
65
  """Total number of detected changes."""
58
- return len(self.summary)
66
+ return len(self.summary) + len(self.programmable_summary)
59
67
 
60
68
 
61
69
  class DiffService:
@@ -127,10 +135,13 @@ class DiffService:
127
135
  # These are now generated in pydantic_to_sqlalchemy
128
136
  return True
129
137
 
130
- def compute_diff(self) -> SchemaDiff:
138
+ def compute_diff(self, include_programmable: bool = True) -> SchemaDiff:
131
139
  """
132
140
  Compare Pydantic models against database and return differences.
133
141
 
142
+ Args:
143
+ include_programmable: If True, also diff functions/triggers/views
144
+
134
145
  Returns:
135
146
  SchemaDiff with detected changes
136
147
  """
@@ -167,21 +178,62 @@ class DiffService:
167
178
  for op in filtered_ops:
168
179
  summary.extend(self._describe_operation(op))
169
180
 
170
- has_changes = len(summary) > 0
171
-
172
181
  # Generate SQL if there are changes
173
182
  sql = ""
174
- if has_changes and upgrade_ops:
183
+ if summary and upgrade_ops:
175
184
  sql = self._render_sql(upgrade_ops, engine)
176
185
 
186
+ # Programmable objects diff (functions, triggers, views)
187
+ programmable_summary = []
188
+ programmable_sql = ""
189
+ if include_programmable:
190
+ prog_summary, prog_sql = self._compute_programmable_diff()
191
+ programmable_summary = prog_summary
192
+ programmable_sql = prog_sql
193
+
194
+ has_changes = len(summary) > 0 or len(programmable_summary) > 0
195
+
177
196
  return SchemaDiff(
178
197
  has_changes=has_changes,
179
198
  summary=summary,
180
199
  sql=sql,
181
200
  upgrade_ops=upgrade_ops,
182
201
  filtered_count=filtered_count,
202
+ programmable_summary=programmable_summary,
203
+ programmable_sql=programmable_sql,
183
204
  )
184
205
 
206
+ def _compute_programmable_diff(self) -> tuple[list[str], str]:
207
+ """
208
+ Compute diff for programmable objects (functions, triggers, views).
209
+
210
+ Returns:
211
+ Tuple of (summary_lines, sync_sql)
212
+ """
213
+ from .programmable_diff_service import ProgrammableDiffService
214
+
215
+ service = ProgrammableDiffService()
216
+
217
+ # Run async diff in sync context
218
+ try:
219
+ loop = asyncio.get_event_loop()
220
+ except RuntimeError:
221
+ loop = asyncio.new_event_loop()
222
+ asyncio.set_event_loop(loop)
223
+
224
+ result = loop.run_until_complete(service.compute_diff())
225
+
226
+ summary = []
227
+ for diff in result.diffs:
228
+ if diff.status == "missing":
229
+ summary.append(f"+ {diff.object_type.value.upper()} {diff.name} (missing)")
230
+ elif diff.status == "different":
231
+ summary.append(f"~ {diff.object_type.value.upper()} {diff.name} (different)")
232
+ elif diff.status == "extra":
233
+ summary.append(f"- {diff.object_type.value.upper()} {diff.name} (extra in db)")
234
+
235
+ return summary, result.sync_sql
236
+
185
237
  def _filter_operations(self, operations: list) -> tuple[list, int]:
186
238
  """
187
239
  Filter operations based on migration strategy.