remdb 0.3.172__py3-none-any.whl → 0.3.223__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

Files changed (57) hide show
  1. rem/agentic/README.md +262 -2
  2. rem/agentic/context.py +173 -0
  3. rem/agentic/context_builder.py +12 -2
  4. rem/agentic/mcp/tool_wrapper.py +39 -16
  5. rem/agentic/providers/pydantic_ai.py +46 -43
  6. rem/agentic/schema.py +2 -2
  7. rem/agentic/tools/rem_tools.py +11 -0
  8. rem/api/main.py +1 -1
  9. rem/api/mcp_router/resources.py +64 -8
  10. rem/api/mcp_router/server.py +31 -24
  11. rem/api/mcp_router/tools.py +621 -166
  12. rem/api/routers/admin.py +30 -4
  13. rem/api/routers/auth.py +114 -15
  14. rem/api/routers/chat/completions.py +66 -18
  15. rem/api/routers/chat/sse_events.py +7 -3
  16. rem/api/routers/chat/streaming.py +254 -22
  17. rem/api/routers/common.py +18 -0
  18. rem/api/routers/dev.py +7 -1
  19. rem/api/routers/feedback.py +9 -1
  20. rem/api/routers/messages.py +176 -38
  21. rem/api/routers/models.py +9 -1
  22. rem/api/routers/query.py +12 -1
  23. rem/api/routers/shared_sessions.py +16 -0
  24. rem/auth/jwt.py +19 -4
  25. rem/auth/middleware.py +42 -28
  26. rem/cli/README.md +62 -0
  27. rem/cli/commands/ask.py +1 -1
  28. rem/cli/commands/db.py +148 -70
  29. rem/cli/commands/process.py +171 -43
  30. rem/models/entities/ontology.py +91 -101
  31. rem/schemas/agents/rem.yaml +1 -1
  32. rem/services/content/service.py +18 -5
  33. rem/services/email/service.py +11 -2
  34. rem/services/embeddings/worker.py +26 -12
  35. rem/services/postgres/__init__.py +28 -3
  36. rem/services/postgres/diff_service.py +57 -5
  37. rem/services/postgres/programmable_diff_service.py +635 -0
  38. rem/services/postgres/pydantic_to_sqlalchemy.py +2 -2
  39. rem/services/postgres/register_type.py +12 -11
  40. rem/services/postgres/repository.py +46 -25
  41. rem/services/postgres/schema_generator.py +5 -5
  42. rem/services/postgres/sql_builder.py +6 -5
  43. rem/services/session/__init__.py +8 -1
  44. rem/services/session/compression.py +40 -2
  45. rem/services/session/pydantic_messages.py +276 -0
  46. rem/settings.py +28 -0
  47. rem/sql/background_indexes.sql +5 -0
  48. rem/sql/migrations/001_install.sql +157 -10
  49. rem/sql/migrations/002_install_models.sql +160 -132
  50. rem/sql/migrations/004_cache_system.sql +7 -275
  51. rem/sql/migrations/migrate_session_id_to_uuid.sql +45 -0
  52. rem/utils/model_helpers.py +101 -0
  53. rem/utils/schema_loader.py +6 -6
  54. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/METADATA +1 -1
  55. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/RECORD +57 -53
  56. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/WHEEL +0 -0
  57. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/entry_points.txt +0 -0
@@ -1,63 +1,55 @@
1
- """Ontology entity for tenant-specific knowledge extensions.
1
+ """Ontology entity for domain-specific knowledge.
2
2
 
3
- **What is Ontology Extraction?**
3
+ **What are Ontologies?**
4
4
 
5
- Ontologies are **domain-specific structured knowledge** extracted from files using custom
6
- agent schemas. They extend REM's normal file processing pipeline with tenant-specific
7
- parsers that extract structured data the standard chunking pipeline would miss.
5
+ Ontologies are **domain-specific structured knowledge** that can be:
6
+ 1. **Extracted** from files using custom agent schemas (agent-extracted)
7
+ 2. **Loaded directly** from external sources like git repos or S3 (direct-loaded)
8
8
 
9
- **Normal File Processing:**
10
- File → extract text → chunk → embed → resources (semantic search ready)
9
+ **Use Case 1: Agent-Extracted Ontologies**
11
10
 
12
- **Ontology Processing (Tenant Knowledge Extensions):**
13
11
  File → custom agent → structured JSON → ontology (domain knowledge)
14
12
 
15
- **Why Ontologies?**
16
- - Standard chunking gives you semantic search over raw content
17
- - Ontologies give you **structured queryable fields** from domain logic
18
- - Example: A contract PDF becomes both searchable chunks AND a structured record with
19
- parties, dates, payment terms, obligations as queryable fields
13
+ Example: A contract PDF becomes a structured record with parties, dates, payment terms.
14
+
15
+ **Use Case 2: Direct-Loaded Ontologies (Knowledge Bases)**
16
+
17
+ External source (git/S3) load ontology (reference knowledge)
18
+
19
+ Example: A psychiatric ontology of disorders, symptoms, and drugs loaded from markdown
20
+ files in a git repository. Each markdown file becomes an ontology node with:
21
+ - `uri`: git path (e.g., `git://org/repo/ontology/disorders/anxiety/panic-disorder.md`)
22
+ - `content`: markdown content for embedding/search
23
+ - `extracted_data`: parsed frontmatter or structure
20
24
 
21
25
  **Architecture:**
22
- - Runs as part of dreaming worker (background knowledge extraction)
23
- - OntologyConfig defines which files trigger which extractors (MIME type, URI pattern, tags)
26
+ - Runs as part of dreaming worker (background knowledge extraction) OR
27
+ - Loaded directly via `rem db load` for external knowledge bases
28
+ - OntologyConfig defines which files trigger which extractors
24
29
  - Multiple ontologies per file (apply different domain lenses)
25
- - Tenant-scoped: Each tenant can define their own extractors
30
+ - Tenant-scoped: Each tenant can define their own extractors and knowledge bases
26
31
 
27
32
  **Use Cases:**
28
33
 
29
- 1. **Recruitment (CV Parsing)**
30
- - Standard pipeline: Chunks for "find me candidates with Python experience"
31
- - Ontology: Structured fields for filtering/sorting (years_experience, seniority_level, skills[])
32
-
33
- 2. **Legal (Contract Analysis)**
34
- - Standard pipeline: Semantic search over contract text
35
- - Ontology: Queryable fields (parties, effective_date, payment_amount, key_obligations[])
34
+ 1. **Recruitment (CV Parsing)** - Agent-extracted
35
+ - Ontology: Structured fields for filtering/sorting (years_experience, skills[])
36
36
 
37
- 3. **Medical (Health Records)**
38
- - Standard pipeline: Find mentions of conditions
39
- - Ontology: Structured diagnoses, medications, dosages, treatment plans
37
+ 2. **Legal (Contract Analysis)** - Agent-extracted
38
+ - Ontology: Queryable fields (parties, effective_date, payment_amount)
40
39
 
41
- 4. **Finance (Report Analysis)**
42
- - Standard pipeline: Search for financial terms
43
- - Ontology: Extracted metrics, risk_flags, trends, forecasts
40
+ 3. **Medical Knowledge Base** - Direct-loaded
41
+ - Ontology: Disorders, symptoms, medications from curated markdown files
42
+ - Enables semantic search over psychiatric/medical domain knowledge
44
43
 
45
- **Example Flow:**
46
- 1. Tenant creates OntologyConfig: "Run cv-parser-v1 on files with mime_type='application/pdf' and tags=['resume']"
47
- 2. File uploaded with tags=["resume"]
48
- 3. Normal processing: File → chunks → resources
49
- 4. Dreaming worker detects matching OntologyConfig
50
- 5. Loads cv-parser-v1 agent schema from database
51
- 6. Runs agent on file content → extracts structured data
52
- 7. Stores Ontology with extracted_data = {candidate_name, skills, experience, education, ...}
53
- 8. Ontology is now queryable via LOOKUP, SEARCH, or direct SQL
44
+ 4. **Documentation/Procedures** - Direct-loaded
45
+ - Ontology: Clinical procedures (e.g., SCID-5 assessment steps)
46
+ - Reference material accessible via RAG
54
47
 
55
48
  **Design:**
56
- - Each ontology links to a File via file_id
57
- - Agent schema tracked via agent_schema_id (human-readable label, not UUID)
58
- - Structured data in `extracted_data` (arbitrary JSON, schema defined by agent)
59
- - Embeddings generated for semantic search (configurable fields via agent schema)
60
- - Multiple ontologies per file using different schemas
49
+ - `file_id` and `agent_schema_id` are optional (only needed for agent-extracted)
50
+ - `uri` field for external source references (git://, s3://, https://)
51
+ - Structured data in `extracted_data` (arbitrary JSON)
52
+ - Embeddings generated for semantic search via `content` field
61
53
  - Tenant-isolated: OntologyConfigs are tenant-scoped
62
54
  """
63
55
 
@@ -70,18 +62,19 @@ from ..core.core_model import CoreModel
70
62
 
71
63
 
72
64
  class Ontology(CoreModel):
73
- """Domain-specific knowledge extracted from files using custom agents.
65
+ """Domain-specific knowledge - either agent-extracted or direct-loaded.
74
66
 
75
67
  Attributes:
76
68
  name: Human-readable label for this ontology instance
77
- file_id: Foreign key to File entity that was processed
78
- agent_schema_id: Foreign key to Schema entity that performed extraction
79
- provider_name: LLM provider used for extraction (e.g., "anthropic", "openai")
80
- model_name: Specific model used (e.g., "claude-sonnet-4-5")
81
- extracted_data: Structured data extracted by agent (arbitrary JSON)
69
+ uri: External source reference (git://, s3://, https://) for direct-loaded ontologies
70
+ file_id: Foreign key to File entity (optional - only for agent-extracted)
71
+ agent_schema_id: Schema that performed extraction (optional - only for agent-extracted)
72
+ provider_name: LLM provider used for extraction (optional)
73
+ model_name: Specific model used (optional)
74
+ extracted_data: Structured data - either extracted by agent or parsed from source
82
75
  confidence_score: Optional confidence score from extraction (0.0-1.0)
83
76
  extraction_timestamp: When extraction was performed
84
- embedding_text: Text used for generating embedding (derived from extracted_data)
77
+ content: Text used for generating embedding
85
78
 
86
79
  Inherited from CoreModel:
87
80
  id: UUID or string identifier
@@ -93,10 +86,9 @@ class Ontology(CoreModel):
93
86
  graph_edges: Relationships to other entities
94
87
  metadata: Flexible metadata storage
95
88
  tags: Classification tags
96
- column: Database schema metadata
97
89
 
98
90
  Example Usage:
99
- # CV extraction
91
+ # Agent-extracted: CV parsing
100
92
  cv_ontology = Ontology(
101
93
  name="john-doe-cv-2024",
102
94
  file_id="file-uuid-123",
@@ -105,73 +97,72 @@ class Ontology(CoreModel):
105
97
  model_name="claude-sonnet-4-5-20250929",
106
98
  extracted_data={
107
99
  "candidate_name": "John Doe",
108
- "email": "john@example.com",
109
100
  "skills": ["Python", "PostgreSQL", "Kubernetes"],
110
- "experience": [
111
- {
112
- "company": "TechCorp",
113
- "role": "Senior Engineer",
114
- "years": 3,
115
- "achievements": ["Led migration to k8s", "Reduced costs 40%"]
116
- }
117
- ],
118
- "education": [
119
- {"degree": "BS Computer Science", "institution": "MIT", "year": 2018}
120
- ]
121
101
  },
122
102
  confidence_score=0.95,
123
- tags=["cv", "engineering", "senior-level"]
103
+ tags=["cv", "engineering"]
124
104
  )
125
105
 
126
- # Contract extraction
127
- contract_ontology = Ontology(
128
- name="acme-supplier-agreement-2024",
129
- file_id="file-uuid-456",
130
- agent_schema_id="contract-parser-v2",
131
- provider_name="openai",
132
- model_name="gpt-4.1",
106
+ # Direct-loaded: Knowledge base from git
107
+ api_docs = Ontology(
108
+ name="rest-api-guide",
109
+ uri="git://example-org/docs/api/rest-api-guide.md",
110
+ content="# REST API Guide\\n\\nThis guide covers RESTful API design...",
133
111
  extracted_data={
134
- "contract_type": "supplier_agreement",
135
- "parties": [
136
- {"name": "ACME Corp", "role": "buyer"},
137
- {"name": "SupplyChain Inc", "role": "supplier"}
138
- ],
139
- "effective_date": "2024-01-01",
140
- "termination_date": "2026-12-31",
141
- "payment_terms": {
142
- "amount": 500000,
143
- "currency": "USD",
144
- "frequency": "quarterly"
145
- },
146
- "key_obligations": [
147
- "Supplier must deliver within 30 days",
148
- "Buyer must pay within 60 days of invoice"
149
- ]
112
+ "type": "documentation",
113
+ "category": "api",
114
+ "version": "2.0",
115
+ },
116
+ tags=["api", "rest", "documentation"]
117
+ )
118
+
119
+ # Direct-loaded: Technical spec from git
120
+ config_spec = Ontology(
121
+ name="config-schema",
122
+ uri="git://example-org/docs/specs/config-schema.md",
123
+ content="# Configuration Schema\\n\\nThis document defines...",
124
+ extracted_data={
125
+ "type": "specification",
126
+ "format": "yaml",
127
+ "version": "1.0",
150
128
  },
151
- confidence_score=0.92,
152
- tags=["contract", "supplier", "procurement"]
129
+ tags=["config", "schema", "specification"]
153
130
  )
154
131
  """
155
132
 
156
133
  # Core fields
157
134
  name: str
158
- file_id: UUID | str
159
- agent_schema_id: str # Natural language label of Schema entity
135
+ uri: Optional[str] = None # External source: git://, s3://, https://
160
136
 
161
- # Extraction metadata
162
- provider_name: str # LLM provider (anthropic, openai, etc.)
163
- model_name: str # Specific model used
164
- extracted_data: dict[str, Any] # Arbitrary structured data from agent
137
+ # Agent extraction fields (optional - only for agent-extracted ontologies)
138
+ file_id: Optional[UUID | str] = None # FK to File entity
139
+ agent_schema_id: Optional[str] = None # Schema that performed extraction
140
+ provider_name: Optional[str] = None # LLM provider (anthropic, openai, etc.)
141
+ model_name: Optional[str] = None # Specific model used
142
+
143
+ # Data fields
144
+ extracted_data: Optional[dict[str, Any]] = None # Structured data
165
145
  confidence_score: Optional[float] = None # 0.0-1.0 if provided by agent
166
146
  extraction_timestamp: Optional[str] = None # ISO8601 timestamp
167
147
 
168
- # Semantic search support
169
- embedding_text: Optional[str] = None # Text for embedding generation
148
+ # Semantic search support - 'content' is a default embeddable field name
149
+ content: Optional[str] = None # Text for embedding generation
170
150
 
171
151
  model_config = ConfigDict(
172
152
  json_schema_extra={
173
- "description": "Domain-specific knowledge extracted from files using custom agents",
153
+ "description": "Domain-specific knowledge - agent-extracted or direct-loaded from external sources",
174
154
  "examples": [
155
+ {
156
+ "name": "panic-disorder",
157
+ "uri": "git://org/repo/ontology/disorders/anxiety/panic-disorder.md",
158
+ "content": "# Panic Disorder\n\nPanic disorder is characterized by...",
159
+ "extracted_data": {
160
+ "type": "disorder",
161
+ "category": "anxiety",
162
+ "icd10": "F41.0"
163
+ },
164
+ "tags": ["disorder", "anxiety"]
165
+ },
175
166
  {
176
167
  "name": "john-doe-cv-2024",
177
168
  "file_id": "550e8400-e29b-41d4-a716-446655440000",
@@ -180,8 +171,7 @@ class Ontology(CoreModel):
180
171
  "model_name": "claude-sonnet-4-5-20250929",
181
172
  "extracted_data": {
182
173
  "candidate_name": "John Doe",
183
- "skills": ["Python", "PostgreSQL"],
184
- "experience": []
174
+ "skills": ["Python", "PostgreSQL"]
185
175
  },
186
176
  "confidence_score": 0.95,
187
177
  "tags": ["cv", "engineering"]
@@ -124,7 +124,7 @@ json_schema_extra:
124
124
 
125
125
  # Explicit resource declarations for reference data
126
126
  resources:
127
- - uri: rem://schemas
127
+ - uri: rem://agents
128
128
  name: Agent Schemas List
129
129
  description: List all available agent schemas in the system
130
130
  - uri: rem://status
@@ -274,7 +274,7 @@ class ContentService:
274
274
  async def ingest_file(
275
275
  self,
276
276
  file_uri: str,
277
- user_id: str,
277
+ user_id: str | None = None,
278
278
  category: str | None = None,
279
279
  tags: list[str] | None = None,
280
280
  is_local_server: bool = False,
@@ -283,6 +283,10 @@ class ContentService:
283
283
  """
284
284
  Complete file ingestion pipeline: read → store → parse → chunk → embed.
285
285
 
286
+ **IMPORTANT: Data is PUBLIC by default (user_id=None).**
287
+ This is correct for shared knowledge bases (ontologies, procedures, reference data).
288
+ Private user-scoped data is rarely needed - only set user_id for truly personal content.
289
+
286
290
  **CENTRALIZED INGESTION**: This is the single entry point for all file ingestion
287
291
  in REM. It handles:
288
292
 
@@ -319,7 +323,9 @@ class ContentService:
319
323
 
320
324
  Args:
321
325
  file_uri: Source file location (local path, s3://, or https://)
322
- user_id: User identifier for data isolation and ownership
326
+ user_id: User identifier for PRIVATE data only. Default None = PUBLIC/shared.
327
+ Leave as None for shared knowledge bases, ontologies, reference data.
328
+ Only set for truly private user-specific content.
323
329
  category: Optional category tag (document, code, audio, etc.)
324
330
  tags: Optional list of tags
325
331
  is_local_server: True if running as local/stdio MCP server
@@ -347,12 +353,19 @@ class ContentService:
347
353
 
348
354
  Example:
349
355
  >>> service = ContentService()
356
+ >>> # PUBLIC data (default) - visible to all users
350
357
  >>> result = await service.ingest_file(
351
- ... file_uri="s3://bucket/contract.pdf",
352
- ... user_id="user-123",
353
- ... category="legal"
358
+ ... file_uri="s3://bucket/procedure.pdf",
359
+ ... category="medical"
354
360
  ... )
355
361
  >>> print(f"Created {result['resources_created']} searchable chunks")
362
+ >>>
363
+ >>> # PRIVATE data (rare) - only for user-specific content
364
+ >>> result = await service.ingest_file(
365
+ ... file_uri="s3://bucket/personal-notes.pdf",
366
+ ... user_id="user-123", # Only this user can access
367
+ ... category="personal"
368
+ ... )
356
369
  """
357
370
  from pathlib import Path
358
371
  from uuid import uuid4
@@ -376,8 +376,17 @@ class EmailService:
376
376
  await user_repo.upsert(existing_user)
377
377
  return {"allowed": True, "error": None}
378
378
  else:
379
- # New user - check if domain is trusted
380
- if settings and hasattr(settings, 'email') and settings.email.trusted_domain_list:
379
+ # New user - first check if they're a subscriber (by email lookup)
380
+ from ...models.entities import Subscriber
381
+ subscriber_repo = Repository(Subscriber, db=db)
382
+ existing_subscriber = await subscriber_repo.find_one({"email": email})
383
+
384
+ if existing_subscriber:
385
+ # Subscriber exists - allow them to create account
386
+ # (approved field may not exist in older schemas, so just check existence)
387
+ logger.info(f"Subscriber {email} creating user account")
388
+ elif settings and hasattr(settings, 'email') and settings.email.trusted_domain_list:
389
+ # Not an approved subscriber - check if domain is trusted
381
390
  if not settings.email.is_domain_trusted(email):
382
391
  email_domain = email.split("@")[-1]
383
392
  logger.warning(f"Untrusted domain attempted signup: {email_domain}")
@@ -23,6 +23,8 @@ Future:
23
23
  import asyncio
24
24
  import os
25
25
  from typing import Any, Optional
26
+ import hashlib
27
+ import uuid
26
28
  from uuid import uuid4
27
29
 
28
30
  import httpx
@@ -108,6 +110,7 @@ class EmbeddingWorker:
108
110
  self.task_queue: asyncio.Queue = asyncio.Queue()
109
111
  self.workers: list[asyncio.Task] = []
110
112
  self.running = False
113
+ self._in_flight_count = 0 # Track tasks being processed (not just in queue)
111
114
 
112
115
  # Store API key for direct HTTP requests
113
116
  from ...settings import settings
@@ -143,17 +146,18 @@ class EmbeddingWorker:
143
146
  return
144
147
 
145
148
  queue_size = self.task_queue.qsize()
146
- logger.debug(f"Stopping EmbeddingWorker (processing {queue_size} queued tasks first)")
149
+ in_flight = self._in_flight_count
150
+ logger.debug(f"Stopping EmbeddingWorker (queue={queue_size}, in_flight={in_flight})")
147
151
 
148
- # Wait for queue to drain (with timeout)
152
+ # Wait for both queue to drain AND in-flight tasks to complete
149
153
  max_wait = 30 # 30 seconds max
150
154
  waited = 0.0
151
- while not self.task_queue.empty() and waited < max_wait:
155
+ while (not self.task_queue.empty() or self._in_flight_count > 0) and waited < max_wait:
152
156
  await asyncio.sleep(0.5)
153
157
  waited += 0.5
154
158
 
155
- if not self.task_queue.empty():
156
- remaining = self.task_queue.qsize()
159
+ if not self.task_queue.empty() or self._in_flight_count > 0:
160
+ remaining = self.task_queue.qsize() + self._in_flight_count
157
161
  logger.warning(
158
162
  f"EmbeddingWorker timeout: {remaining} tasks remaining after {max_wait}s"
159
163
  )
@@ -205,12 +209,18 @@ class EmbeddingWorker:
205
209
  if not batch:
206
210
  continue
207
211
 
208
- logger.debug(f"Worker {worker_id} processing batch of {len(batch)} tasks")
212
+ # Track in-flight tasks
213
+ self._in_flight_count += len(batch)
209
214
 
210
- # Generate embeddings for batch
211
- await self._process_batch(batch)
215
+ logger.debug(f"Worker {worker_id} processing batch of {len(batch)} tasks")
212
216
 
213
- logger.debug(f"Worker {worker_id} completed batch")
217
+ try:
218
+ # Generate embeddings for batch
219
+ await self._process_batch(batch)
220
+ logger.debug(f"Worker {worker_id} completed batch")
221
+ finally:
222
+ # Always decrement in-flight count, even on error
223
+ self._in_flight_count -= len(batch)
214
224
 
215
225
  except asyncio.CancelledError:
216
226
  logger.debug(f"Worker {worker_id} cancelled")
@@ -373,7 +383,11 @@ class EmbeddingWorker:
373
383
  for task, embedding in zip(tasks, embeddings):
374
384
  table_name = f"embeddings_{task.table_name}"
375
385
 
376
- # Build upsert SQL
386
+ # Generate deterministic ID from key fields (entity_id, field_name, provider)
387
+ key_string = f"{task.entity_id}:{task.field_name}:{task.provider}"
388
+ embedding_id = str(uuid.UUID(hashlib.md5(key_string.encode()).hexdigest()))
389
+
390
+ # Build upsert SQL - conflict on deterministic ID
377
391
  sql = f"""
378
392
  INSERT INTO {table_name} (
379
393
  id,
@@ -386,7 +400,7 @@ class EmbeddingWorker:
386
400
  updated_at
387
401
  )
388
402
  VALUES ($1, $2, $3, $4, $5, $6, CURRENT_TIMESTAMP, CURRENT_TIMESTAMP)
389
- ON CONFLICT (entity_id, field_name, provider)
403
+ ON CONFLICT (id)
390
404
  DO UPDATE SET
391
405
  model = EXCLUDED.model,
392
406
  embedding = EXCLUDED.embedding,
@@ -400,7 +414,7 @@ class EmbeddingWorker:
400
414
  await self.postgres_service.execute(
401
415
  sql,
402
416
  (
403
- str(uuid4()),
417
+ embedding_id,
404
418
  task.entity_id,
405
419
  task.field_name,
406
420
  task.provider,
@@ -3,22 +3,47 @@ PostgreSQL service for CloudNativePG database operations.
3
3
  """
4
4
 
5
5
  from .diff_service import DiffService, SchemaDiff
6
+ from .programmable_diff_service import (
7
+ DiffResult,
8
+ ObjectDiff,
9
+ ObjectType,
10
+ ProgrammableDiffService,
11
+ )
6
12
  from .repository import Repository
7
13
  from .service import PostgresService
8
14
 
9
15
 
16
+ _postgres_instance: PostgresService | None = None
17
+
18
+
10
19
  def get_postgres_service() -> PostgresService | None:
11
20
  """
12
- Get PostgresService instance.
21
+ Get PostgresService singleton instance.
13
22
 
14
23
  Returns None if Postgres is disabled.
24
+ Uses singleton pattern to prevent connection pool exhaustion.
15
25
  """
26
+ global _postgres_instance
27
+
16
28
  from ...settings import settings
17
29
 
18
30
  if not settings.postgres.enabled:
19
31
  return None
20
32
 
21
- return PostgresService()
33
+ if _postgres_instance is None:
34
+ _postgres_instance = PostgresService()
35
+
36
+ return _postgres_instance
22
37
 
23
38
 
24
- __all__ = ["PostgresService", "get_postgres_service", "Repository", "DiffService", "SchemaDiff"]
39
+ __all__ = [
40
+ "DiffResult",
41
+ "DiffService",
42
+ "ObjectDiff",
43
+ "ObjectType",
44
+ "PostgresService",
45
+ "ProgrammableDiffService",
46
+ "Repository",
47
+ "SchemaDiff",
48
+ "get_postgres_service",
49
+ ]
@@ -5,12 +5,17 @@ Uses Alembic autogenerate to detect differences between:
5
5
  - Target schema (derived from Pydantic models)
6
6
  - Current database schema
7
7
 
8
+ Also compares programmable objects (functions, triggers, views) which
9
+ Alembic does not track.
10
+
8
11
  This enables:
9
12
  1. Local development: See what would change before applying migrations
10
13
  2. CI validation: Detect drift between code and database (--check mode)
11
14
  3. Migration generation: Create incremental migration files
12
15
  """
13
16
 
17
+ import asyncio
18
+ import re
14
19
  from dataclasses import dataclass, field
15
20
  from pathlib import Path
16
21
  from typing import Optional
@@ -51,11 +56,14 @@ class SchemaDiff:
51
56
  sql: str = ""
52
57
  upgrade_ops: Optional[ops.UpgradeOps] = None
53
58
  filtered_count: int = 0 # Number of operations filtered out by strategy
59
+ # Programmable objects (functions, triggers, views)
60
+ programmable_summary: list[str] = field(default_factory=list)
61
+ programmable_sql: str = ""
54
62
 
55
63
  @property
56
64
  def change_count(self) -> int:
57
65
  """Total number of detected changes."""
58
- return len(self.summary)
66
+ return len(self.summary) + len(self.programmable_summary)
59
67
 
60
68
 
61
69
  class DiffService:
@@ -127,10 +135,13 @@ class DiffService:
127
135
  # These are now generated in pydantic_to_sqlalchemy
128
136
  return True
129
137
 
130
- def compute_diff(self) -> SchemaDiff:
138
+ def compute_diff(self, include_programmable: bool = True) -> SchemaDiff:
131
139
  """
132
140
  Compare Pydantic models against database and return differences.
133
141
 
142
+ Args:
143
+ include_programmable: If True, also diff functions/triggers/views
144
+
134
145
  Returns:
135
146
  SchemaDiff with detected changes
136
147
  """
@@ -167,21 +178,62 @@ class DiffService:
167
178
  for op in filtered_ops:
168
179
  summary.extend(self._describe_operation(op))
169
180
 
170
- has_changes = len(summary) > 0
171
-
172
181
  # Generate SQL if there are changes
173
182
  sql = ""
174
- if has_changes and upgrade_ops:
183
+ if summary and upgrade_ops:
175
184
  sql = self._render_sql(upgrade_ops, engine)
176
185
 
186
+ # Programmable objects diff (functions, triggers, views)
187
+ programmable_summary = []
188
+ programmable_sql = ""
189
+ if include_programmable:
190
+ prog_summary, prog_sql = self._compute_programmable_diff()
191
+ programmable_summary = prog_summary
192
+ programmable_sql = prog_sql
193
+
194
+ has_changes = len(summary) > 0 or len(programmable_summary) > 0
195
+
177
196
  return SchemaDiff(
178
197
  has_changes=has_changes,
179
198
  summary=summary,
180
199
  sql=sql,
181
200
  upgrade_ops=upgrade_ops,
182
201
  filtered_count=filtered_count,
202
+ programmable_summary=programmable_summary,
203
+ programmable_sql=programmable_sql,
183
204
  )
184
205
 
206
+ def _compute_programmable_diff(self) -> tuple[list[str], str]:
207
+ """
208
+ Compute diff for programmable objects (functions, triggers, views).
209
+
210
+ Returns:
211
+ Tuple of (summary_lines, sync_sql)
212
+ """
213
+ from .programmable_diff_service import ProgrammableDiffService
214
+
215
+ service = ProgrammableDiffService()
216
+
217
+ # Run async diff in sync context
218
+ try:
219
+ loop = asyncio.get_event_loop()
220
+ except RuntimeError:
221
+ loop = asyncio.new_event_loop()
222
+ asyncio.set_event_loop(loop)
223
+
224
+ result = loop.run_until_complete(service.compute_diff())
225
+
226
+ summary = []
227
+ for diff in result.diffs:
228
+ if diff.status == "missing":
229
+ summary.append(f"+ {diff.object_type.value.upper()} {diff.name} (missing)")
230
+ elif diff.status == "different":
231
+ summary.append(f"~ {diff.object_type.value.upper()} {diff.name} (different)")
232
+ elif diff.status == "extra":
233
+ summary.append(f"- {diff.object_type.value.upper()} {diff.name} (extra in db)")
234
+
235
+ return summary, result.sync_sql
236
+
185
237
  def _filter_operations(self, operations: list) -> tuple[list, int]:
186
238
  """
187
239
  Filter operations based on migration strategy.