remdb 0.3.7__py3-none-any.whl → 0.3.133__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (107) hide show
  1. rem/__init__.py +129 -2
  2. rem/agentic/README.md +76 -0
  3. rem/agentic/__init__.py +15 -0
  4. rem/agentic/agents/__init__.py +16 -2
  5. rem/agentic/agents/sse_simulator.py +502 -0
  6. rem/agentic/context.py +51 -25
  7. rem/agentic/llm_provider_models.py +301 -0
  8. rem/agentic/mcp/tool_wrapper.py +112 -17
  9. rem/agentic/otel/setup.py +93 -4
  10. rem/agentic/providers/phoenix.py +314 -132
  11. rem/agentic/providers/pydantic_ai.py +215 -26
  12. rem/agentic/schema.py +361 -21
  13. rem/agentic/tools/rem_tools.py +3 -3
  14. rem/api/README.md +238 -1
  15. rem/api/deps.py +255 -0
  16. rem/api/main.py +154 -37
  17. rem/api/mcp_router/resources.py +1 -1
  18. rem/api/mcp_router/server.py +26 -5
  19. rem/api/mcp_router/tools.py +465 -7
  20. rem/api/middleware/tracking.py +172 -0
  21. rem/api/routers/admin.py +494 -0
  22. rem/api/routers/auth.py +124 -0
  23. rem/api/routers/chat/completions.py +402 -20
  24. rem/api/routers/chat/models.py +88 -10
  25. rem/api/routers/chat/otel_utils.py +33 -0
  26. rem/api/routers/chat/sse_events.py +542 -0
  27. rem/api/routers/chat/streaming.py +642 -45
  28. rem/api/routers/dev.py +81 -0
  29. rem/api/routers/feedback.py +268 -0
  30. rem/api/routers/messages.py +473 -0
  31. rem/api/routers/models.py +78 -0
  32. rem/api/routers/query.py +360 -0
  33. rem/api/routers/shared_sessions.py +406 -0
  34. rem/auth/middleware.py +126 -27
  35. rem/cli/commands/README.md +237 -64
  36. rem/cli/commands/ask.py +13 -10
  37. rem/cli/commands/cluster.py +1808 -0
  38. rem/cli/commands/configure.py +5 -6
  39. rem/cli/commands/db.py +396 -139
  40. rem/cli/commands/experiments.py +469 -74
  41. rem/cli/commands/process.py +22 -15
  42. rem/cli/commands/scaffold.py +47 -0
  43. rem/cli/commands/schema.py +97 -50
  44. rem/cli/main.py +29 -6
  45. rem/config.py +10 -3
  46. rem/models/core/core_model.py +7 -1
  47. rem/models/core/experiment.py +54 -0
  48. rem/models/core/rem_query.py +5 -2
  49. rem/models/entities/__init__.py +21 -0
  50. rem/models/entities/domain_resource.py +38 -0
  51. rem/models/entities/feedback.py +123 -0
  52. rem/models/entities/message.py +30 -1
  53. rem/models/entities/session.py +83 -0
  54. rem/models/entities/shared_session.py +180 -0
  55. rem/models/entities/user.py +10 -3
  56. rem/registry.py +373 -0
  57. rem/schemas/agents/rem.yaml +7 -3
  58. rem/services/content/providers.py +92 -133
  59. rem/services/content/service.py +92 -20
  60. rem/services/dreaming/affinity_service.py +2 -16
  61. rem/services/dreaming/moment_service.py +2 -15
  62. rem/services/embeddings/api.py +24 -17
  63. rem/services/embeddings/worker.py +16 -16
  64. rem/services/phoenix/EXPERIMENT_DESIGN.md +3 -3
  65. rem/services/phoenix/client.py +302 -28
  66. rem/services/postgres/README.md +159 -15
  67. rem/services/postgres/__init__.py +2 -1
  68. rem/services/postgres/diff_service.py +531 -0
  69. rem/services/postgres/pydantic_to_sqlalchemy.py +427 -129
  70. rem/services/postgres/repository.py +132 -0
  71. rem/services/postgres/schema_generator.py +291 -9
  72. rem/services/postgres/service.py +6 -6
  73. rem/services/rate_limit.py +113 -0
  74. rem/services/rem/README.md +14 -0
  75. rem/services/rem/parser.py +44 -9
  76. rem/services/rem/service.py +36 -2
  77. rem/services/session/compression.py +24 -1
  78. rem/services/session/reload.py +1 -1
  79. rem/services/user_service.py +98 -0
  80. rem/settings.py +399 -29
  81. rem/sql/background_indexes.sql +21 -16
  82. rem/sql/migrations/001_install.sql +387 -54
  83. rem/sql/migrations/002_install_models.sql +2320 -393
  84. rem/sql/migrations/003_optional_extensions.sql +326 -0
  85. rem/sql/migrations/004_cache_system.sql +548 -0
  86. rem/utils/__init__.py +18 -0
  87. rem/utils/constants.py +97 -0
  88. rem/utils/date_utils.py +228 -0
  89. rem/utils/embeddings.py +17 -4
  90. rem/utils/files.py +167 -0
  91. rem/utils/mime_types.py +158 -0
  92. rem/utils/model_helpers.py +156 -1
  93. rem/utils/schema_loader.py +282 -35
  94. rem/utils/sql_paths.py +146 -0
  95. rem/utils/sql_types.py +3 -1
  96. rem/utils/vision.py +9 -14
  97. rem/workers/README.md +14 -14
  98. rem/workers/__init__.py +3 -1
  99. rem/workers/db_listener.py +579 -0
  100. rem/workers/db_maintainer.py +74 -0
  101. rem/workers/unlogged_maintainer.py +463 -0
  102. {remdb-0.3.7.dist-info → remdb-0.3.133.dist-info}/METADATA +460 -303
  103. {remdb-0.3.7.dist-info → remdb-0.3.133.dist-info}/RECORD +105 -74
  104. {remdb-0.3.7.dist-info → remdb-0.3.133.dist-info}/WHEEL +1 -1
  105. rem/sql/002_install_models.sql +0 -1068
  106. rem/sql/install_models.sql +0 -1038
  107. {remdb-0.3.7.dist-info → remdb-0.3.133.dist-info}/entry_points.txt +0 -0
@@ -2,17 +2,27 @@
2
2
 
3
3
  import json
4
4
  import multiprocessing
5
- import os
6
5
  import random
7
6
  import subprocess
8
7
  import sys
9
- import tempfile
10
8
  from abc import ABC, abstractmethod
11
9
  from pathlib import Path
12
10
  from typing import Any, Optional
13
11
 
14
12
  from loguru import logger
15
13
 
14
+ from rem.utils.constants import (
15
+ AUDIO_CHUNK_TARGET_SECONDS,
16
+ AUDIO_CHUNK_WINDOW_SECONDS,
17
+ MIN_SILENCE_MS,
18
+ SILENCE_THRESHOLD_DB,
19
+ SUBPROCESS_TIMEOUT_SECONDS,
20
+ WAV_HEADER_MIN_BYTES,
21
+ WHISPER_COST_PER_MINUTE,
22
+ )
23
+ from rem.utils.files import temp_file_from_bytes
24
+ from rem.utils.mime_types import get_extension
25
+
16
26
 
17
27
  class ContentProvider(ABC):
18
28
  """Base class for content extraction providers."""
@@ -132,7 +142,7 @@ import sys
132
142
  from pathlib import Path
133
143
  from kreuzberg import ExtractionConfig, extract_file_sync
134
144
 
135
- # Parse document with table extraction (requires PyTorch - Python <3.13 required)
145
+ # Parse document with kreuzberg 3.x
136
146
  config = ExtractionConfig(
137
147
  extract_tables=True,
138
148
  chunk_content=False,
@@ -155,7 +165,7 @@ print(json.dumps(output))
155
165
  [sys.executable, "-c", script, str(file_path)],
156
166
  capture_output=True,
157
167
  text=True,
158
- timeout=300, # 5 minute timeout
168
+ timeout=SUBPROCESS_TIMEOUT_SECONDS,
159
169
  )
160
170
 
161
171
  if result.returncode != 0:
@@ -177,21 +187,9 @@ print(json.dumps(output))
177
187
  # Write bytes to temp file for kreuzberg
178
188
  # Detect extension from metadata
179
189
  content_type = metadata.get("content_type", "")
180
- extension_map = {
181
- "application/pdf": ".pdf",
182
- "application/vnd.openxmlformats-officedocument.wordprocessingml.document": ".docx",
183
- "application/vnd.openxmlformats-officedocument.presentationml.presentation": ".pptx",
184
- "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet": ".xlsx",
185
- "image/png": ".png",
186
- "image/jpeg": ".jpg",
187
- }
188
- suffix = extension_map.get(content_type, ".pdf") # Default to PDF
190
+ suffix = get_extension(content_type, default=".pdf")
189
191
 
190
- with tempfile.NamedTemporaryFile(suffix=suffix, delete=False) as tmp:
191
- tmp.write(content)
192
- tmp_path = Path(tmp.name)
193
-
194
- try:
192
+ with temp_file_from_bytes(content, suffix=suffix) as tmp_path:
195
193
  # Check if running in daemon process
196
194
  if self._is_daemon_process():
197
195
  logger.info("Daemon process detected - using subprocess workaround for document parsing")
@@ -205,7 +203,7 @@ print(json.dumps(output))
205
203
  }
206
204
  except Exception as e:
207
205
  logger.error(f"Subprocess parsing failed: {e}. Falling back to text-only.")
208
- # Fallback to simple text extraction
206
+ # Fallback to simple text extraction (kreuzberg 3.x API)
209
207
  from kreuzberg import ExtractionConfig, extract_file_sync
210
208
  config = ExtractionConfig(extract_tables=False)
211
209
  result = extract_file_sync(tmp_path, config=config)
@@ -215,13 +213,12 @@ print(json.dumps(output))
215
213
  "file_extension": tmp_path.suffix,
216
214
  }
217
215
  else:
218
- # Normal execution (not in daemon)
216
+ # Normal execution (not in daemon) - kreuzberg 4.x with native ONNX/Rust
219
217
  from kreuzberg import ExtractionConfig, extract_file_sync
220
- # Table extraction with gmft (requires PyTorch - Python <3.13 required)
221
218
  config = ExtractionConfig(
222
- extract_tables=True,
223
- chunk_content=False,
224
- extract_keywords=False,
219
+ enable_quality_processing=True, # Enables table extraction with native ONNX
220
+ chunk_content=False, # We handle chunking ourselves
221
+ extract_tables=False, # Disable table extraction to avoid PyTorch dependency
225
222
  )
226
223
  result = extract_file_sync(tmp_path, config=config)
227
224
  text = result.content
@@ -236,10 +233,6 @@ print(json.dumps(output))
236
233
  "metadata": extraction_metadata,
237
234
  }
238
235
 
239
- finally:
240
- # Clean up temp file
241
- tmp_path.unlink(missing_ok=True)
242
-
243
236
 
244
237
  class AudioProvider(ContentProvider):
245
238
  """
@@ -284,19 +277,20 @@ class AudioProvider(ContentProvider):
284
277
  ValueError: If OpenAI API key missing
285
278
  """
286
279
  # Handle empty or invalid content
287
- if not content or len(content) < 44: # WAV header is minimum 44 bytes
280
+ if not content or len(content) < WAV_HEADER_MIN_BYTES:
288
281
  logger.warning("Audio content too small to be valid WAV file")
289
282
  return {
290
283
  "text": "[Invalid or empty audio file]",
291
284
  "metadata": {"error": "invalid_content", "size": len(content)},
292
285
  }
293
286
 
294
- # Check for OpenAI API key
295
- api_key = os.getenv("OPENAI_API_KEY")
287
+ # Check for OpenAI API key (use settings)
288
+ from rem.settings import settings
289
+ api_key = settings.llm.openai_api_key
296
290
  if not api_key:
297
- logger.warning("No OPENAI_API_KEY found - audio transcription disabled")
291
+ logger.warning("No OpenAI API key found - audio transcription disabled")
298
292
  return {
299
- "text": "[Audio transcription requires OPENAI_API_KEY environment variable]",
293
+ "text": "[Audio transcription requires LLM__OPENAI_API_KEY to be set]",
300
294
  "metadata": {"error": "missing_api_key"},
301
295
  }
302
296
 
@@ -313,83 +307,74 @@ class AudioProvider(ContentProvider):
313
307
  # Write bytes to temp file
314
308
  # Detect extension from metadata or use .wav as fallback
315
309
  content_type = metadata.get("content_type", "audio/wav")
316
- extension_map = {
317
- "audio/wav": ".wav",
318
- "audio/mpeg": ".mp3",
319
- "audio/mp4": ".m4a",
320
- "audio/x-m4a": ".m4a",
321
- "audio/flac": ".flac",
322
- "audio/ogg": ".ogg",
323
- }
324
- extension = extension_map.get(content_type, ".wav")
310
+ extension = get_extension(content_type, default=".wav")
325
311
 
326
- with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as tmp:
327
- tmp.write(content)
328
- tmp_path = Path(tmp.name)
312
+ chunker = None
313
+ chunks = None
329
314
 
330
- try:
331
- logger.info(f"Processing audio file: {tmp_path.name} ({len(content) / 1024 / 1024:.1f} MB)")
332
-
333
- # Step 1: Chunk audio by silence
334
- chunker = AudioChunker(
335
- target_chunk_seconds=60.0,
336
- chunk_window_seconds=2.0,
337
- silence_threshold_db=-40.0,
338
- min_silence_ms=500,
339
- )
340
-
341
- chunks = chunker.chunk_audio(tmp_path)
342
- logger.info(f"Created {len(chunks)} audio chunks")
343
-
344
- # Step 2: Transcribe chunks
345
- transcriber = AudioTranscriber(api_key=api_key)
346
- results = transcriber.transcribe_chunks(chunks)
347
- logger.info(f"Transcribed {len(results)} chunks")
348
-
349
- # Step 3: Combine into markdown format
350
- # Format: Each chunk becomes a section with timestamp
351
- markdown_parts = []
352
- for result in results:
353
- timestamp = f"{result.start_seconds:.1f}s - {result.end_seconds:.1f}s"
354
- markdown_parts.append(f"## [{timestamp}]\n\n{result.text}\n")
355
-
356
- markdown_text = "\n".join(markdown_parts)
357
-
358
- # Calculate metadata
359
- total_duration = sum(r.duration_seconds for r in results)
360
- estimated_cost = (total_duration / 60) * 0.006 # $0.006 per minute
361
- successful_chunks = sum(1 for r in results if r.confidence > 0)
362
-
363
- extraction_metadata = {
364
- "chunk_count": len(chunks),
365
- "transcribed_chunks": successful_chunks,
366
- "duration_seconds": total_duration,
367
- "estimated_cost": estimated_cost,
368
- "parser": "whisper_api",
369
- }
315
+ with temp_file_from_bytes(content, suffix=extension) as tmp_path:
316
+ try:
317
+ logger.info(f"Processing audio file: {tmp_path.name} ({len(content) / 1024 / 1024:.1f} MB)")
318
+
319
+ # Step 1: Chunk audio by silence
320
+ chunker = AudioChunker(
321
+ target_chunk_seconds=AUDIO_CHUNK_TARGET_SECONDS,
322
+ chunk_window_seconds=AUDIO_CHUNK_WINDOW_SECONDS,
323
+ silence_threshold_db=SILENCE_THRESHOLD_DB,
324
+ min_silence_ms=MIN_SILENCE_MS,
325
+ )
370
326
 
371
- logger.info(
372
- f"Transcription complete: {successful_chunks}/{len(chunks)} chunks, "
373
- f"${estimated_cost:.3f} cost"
374
- )
327
+ chunks = chunker.chunk_audio(tmp_path)
328
+ logger.info(f"Created {len(chunks)} audio chunks")
375
329
 
376
- return {
377
- "text": markdown_text,
378
- "metadata": extraction_metadata,
379
- }
330
+ # Step 2: Transcribe chunks
331
+ transcriber = AudioTranscriber(api_key=api_key)
332
+ results = transcriber.transcribe_chunks(chunks)
333
+ logger.info(f"Transcribed {len(results)} chunks")
380
334
 
381
- except Exception as e:
382
- logger.error(f"Audio extraction failed: {e}")
383
- raise RuntimeError(f"Audio transcription failed: {e}") from e
335
+ # Step 3: Combine into markdown format
336
+ # Format: Each chunk becomes a section with timestamp
337
+ markdown_parts = []
338
+ for result in results:
339
+ timestamp = f"{result.start_seconds:.1f}s - {result.end_seconds:.1f}s"
340
+ markdown_parts.append(f"## [{timestamp}]\n\n{result.text}\n")
341
+
342
+ markdown_text = "\n".join(markdown_parts)
343
+
344
+ # Calculate metadata
345
+ total_duration = sum(r.duration_seconds for r in results)
346
+ estimated_cost = (total_duration / 60) * WHISPER_COST_PER_MINUTE
347
+ successful_chunks = sum(1 for r in results if r.confidence > 0)
348
+
349
+ extraction_metadata = {
350
+ "chunk_count": len(chunks),
351
+ "transcribed_chunks": successful_chunks,
352
+ "duration_seconds": total_duration,
353
+ "estimated_cost": estimated_cost,
354
+ "parser": "whisper_api",
355
+ }
356
+
357
+ logger.info(
358
+ f"Transcription complete: {successful_chunks}/{len(chunks)} chunks, "
359
+ f"${estimated_cost:.3f} cost"
360
+ )
361
+
362
+ return {
363
+ "text": markdown_text,
364
+ "metadata": extraction_metadata,
365
+ }
384
366
 
385
- finally:
386
- # Clean up temp file and chunks
387
- try:
388
- tmp_path.unlink(missing_ok=True)
389
- if 'chunker' in locals() and 'chunks' in locals():
390
- chunker.cleanup_chunks(chunks)
391
367
  except Exception as e:
392
- logger.warning(f"Cleanup failed: {e}")
368
+ logger.error(f"Audio extraction failed: {e}")
369
+ raise RuntimeError(f"Audio transcription failed: {e}") from e
370
+
371
+ finally:
372
+ # Clean up audio chunks (temp file cleanup handled by context manager)
373
+ if chunker is not None and chunks is not None:
374
+ try:
375
+ chunker.cleanup_chunks(chunks)
376
+ except Exception as e:
377
+ logger.warning(f"Chunk cleanup failed: {e}")
393
378
 
394
379
 
395
380
  class SchemaProvider(ContentProvider):
@@ -667,19 +652,9 @@ class ImageProvider(ContentProvider):
667
652
 
668
653
  # Write bytes to temp file for analysis
669
654
  content_type = metadata.get("content_type", "image/png")
670
- extension_map = {
671
- "image/png": ".png",
672
- "image/jpeg": ".jpg",
673
- "image/gif": ".gif",
674
- "image/webp": ".webp",
675
- }
676
- extension = extension_map.get(content_type, ".png")
677
-
678
- with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as tmp:
679
- tmp.write(content)
680
- tmp_path = Path(tmp.name)
655
+ extension = get_extension(content_type, default=".png")
681
656
 
682
- try:
657
+ with temp_file_from_bytes(content, suffix=extension) as tmp_path:
683
658
  # Analyze image
684
659
  result = analyzer.analyze_image(tmp_path)
685
660
  vision_description = result.description
@@ -687,9 +662,6 @@ class ImageProvider(ContentProvider):
687
662
  vision_model = result.model
688
663
 
689
664
  logger.info(f"Vision analysis complete: {len(vision_description)} chars")
690
- finally:
691
- # Clean up temp file
692
- tmp_path.unlink(missing_ok=True)
693
665
 
694
666
  except ImportError as e:
695
667
  logger.warning(f"Vision analysis not available: {e}")
@@ -732,19 +704,9 @@ class ImageProvider(ContentProvider):
732
704
  if embedder.is_available():
733
705
  # Write bytes to temp file for CLIP embedding
734
706
  content_type = metadata.get("content_type", "image/png")
735
- extension_map = {
736
- "image/png": ".png",
737
- "image/jpeg": ".jpg",
738
- "image/gif": ".gif",
739
- "image/webp": ".webp",
740
- }
741
- extension = extension_map.get(content_type, ".png")
707
+ extension = get_extension(content_type, default=".png")
742
708
 
743
- with tempfile.NamedTemporaryFile(suffix=extension, delete=False) as tmp:
744
- tmp.write(content)
745
- tmp_path = Path(tmp.name)
746
-
747
- try:
709
+ with temp_file_from_bytes(content, suffix=extension) as tmp_path:
748
710
  # Generate CLIP embedding
749
711
  result = embedder.embed_image(tmp_path)
750
712
  if result:
@@ -754,9 +716,6 @@ class ImageProvider(ContentProvider):
754
716
  logger.info(
755
717
  f"CLIP embedding generated: {clip_dimensions} dims, {clip_tokens} tokens"
756
718
  )
757
- finally:
758
- # Clean up temp file
759
- tmp_path.unlink(missing_ok=True)
760
719
  else:
761
720
  logger.debug(
762
721
  "CLIP embeddings disabled - set CONTENT__JINA_API_KEY to enable. "
@@ -278,6 +278,7 @@ class ContentService:
278
278
  category: str | None = None,
279
279
  tags: list[str] | None = None,
280
280
  is_local_server: bool = False,
281
+ resource_type: str | None = None,
281
282
  ) -> dict[str, Any]:
282
283
  """
283
284
  Complete file ingestion pipeline: read → store → parse → chunk → embed.
@@ -322,6 +323,9 @@ class ContentService:
322
323
  category: Optional category tag (document, code, audio, etc.)
323
324
  tags: Optional list of tags
324
325
  is_local_server: True if running as local/stdio MCP server
326
+ resource_type: Optional resource type (case-insensitive). Supports:
327
+ - "resource", "resources", "Resource" → Resource (default)
328
+ - "domain-resource", "domain_resource", "DomainResource" → DomainResource
325
329
 
326
330
  Returns:
327
331
  dict with:
@@ -366,11 +370,32 @@ class ContentService:
366
370
  file_size = len(file_content)
367
371
  logger.info(f"Read {file_size} bytes from {file_uri} (source: {source_type})")
368
372
 
369
- # Step 2: Write to internal storage (user-scoped)
373
+ # Step 1.5: Early schema detection for YAML/JSON files
374
+ # Skip File entity creation for schemas (agents/evaluators)
375
+ file_suffix = Path(file_name).suffix.lower()
376
+ if file_suffix in ['.yaml', '.yml', '.json']:
377
+ import yaml
378
+ import json
379
+ try:
380
+ content_text = file_content.decode('utf-8') if isinstance(file_content, bytes) else file_content
381
+ data = yaml.safe_load(content_text) if file_suffix in ['.yaml', '.yml'] else json.loads(content_text)
382
+ if isinstance(data, dict):
383
+ json_schema_extra = data.get('json_schema_extra', {})
384
+ kind = json_schema_extra.get('kind', '')
385
+ if kind in ['agent', 'evaluator']:
386
+ # Route directly to schema processing, skip File entity
387
+ logger.info(f"Detected {kind} schema: {file_name}, routing to _process_schema")
388
+ result = self.process_uri(file_uri)
389
+ return await self._process_schema(result, file_uri, user_id)
390
+ except Exception as e:
391
+ logger.debug(f"Early schema detection failed for {file_name}: {e}")
392
+ # Fall through to standard file processing
393
+
394
+ # Step 2: Write to internal storage (public or user-scoped)
370
395
  file_id = str(uuid4())
371
396
  storage_uri, internal_key, content_type, _ = await fs_service.write_to_internal_storage(
372
397
  content=file_content,
373
- tenant_id=user_id, # Using user_id for storage scoping
398
+ tenant_id=user_id or "public", # Storage path: public/ or user_id/
374
399
  file_name=file_name,
375
400
  file_id=file_id,
376
401
  )
@@ -379,7 +404,7 @@ class ContentService:
379
404
  # Step 3: Create File entity
380
405
  file_entity = File(
381
406
  id=file_id,
382
- tenant_id=user_id, # Set tenant_id to user_id (application scoped to user)
407
+ tenant_id=user_id, # None = public/shared
383
408
  user_id=user_id,
384
409
  name=file_name,
385
410
  uri=storage_uri,
@@ -418,6 +443,7 @@ class ContentService:
418
443
  processing_result = await self.process_and_save(
419
444
  uri=storage_uri,
420
445
  user_id=user_id,
446
+ resource_type=resource_type,
421
447
  )
422
448
  processing_status = processing_result.get("status", "completed")
423
449
  resources_created = processing_result.get("chunk_count", 0)
@@ -459,7 +485,12 @@ class ContentService:
459
485
  "message": f"File ingested and {processing_status}. Created {resources_created} resources.",
460
486
  }
461
487
 
462
- async def process_and_save(self, uri: str, user_id: str | None = None) -> dict[str, Any]:
488
+ async def process_and_save(
489
+ self,
490
+ uri: str,
491
+ user_id: str | None = None,
492
+ resource_type: str | None = None,
493
+ ) -> dict[str, Any]:
463
494
  """
464
495
  Process file end-to-end: extract → markdown → chunk → save.
465
496
 
@@ -474,6 +505,8 @@ class ContentService:
474
505
  Args:
475
506
  uri: File URI (s3://bucket/key or local path)
476
507
  user_id: Optional user ID for multi-tenancy
508
+ resource_type: Optional resource type (case-insensitive). Defaults to "Resource".
509
+ Supports: resource, domain-resource, domain_resource, DomainResource, etc.
477
510
 
478
511
  Returns:
479
512
  dict with file metadata and chunk count
@@ -526,7 +559,7 @@ class ContentService:
526
559
  size_bytes=result["metadata"].get("size"),
527
560
  mime_type=result["metadata"].get("content_type"),
528
561
  processing_status="completed",
529
- tenant_id=user_id or "default", # Required field
562
+ tenant_id=user_id, # None = public/shared
530
563
  user_id=user_id,
531
564
  )
532
565
 
@@ -534,28 +567,66 @@ class ContentService:
534
567
  await self.file_repo.upsert(file)
535
568
  logger.info(f"Saved File: {filename}")
536
569
 
537
- # Create Resource entities for each chunk
538
- resources = [
539
- Resource(
570
+ # Resolve resource model class from type parameter (case-insensitive)
571
+ from typing import cast, Type
572
+ from pydantic import BaseModel
573
+ from rem.utils.model_helpers import model_from_arbitrary_casing, get_table_name
574
+
575
+ resource_model: Type[BaseModel] = Resource # Default
576
+ if resource_type:
577
+ try:
578
+ resource_model = model_from_arbitrary_casing(resource_type)
579
+ logger.info(f"Using resource model: {resource_model.__name__}")
580
+ except ValueError as e:
581
+ logger.warning(f"Invalid resource_type '{resource_type}', using default Resource: {e}")
582
+ resource_model = Resource
583
+
584
+ # Get table name for the resolved model
585
+ table_name = get_table_name(resource_model)
586
+
587
+ # Create resource entities for each chunk
588
+ resources: list[BaseModel] = [
589
+ resource_model(
540
590
  name=f"{filename}#chunk-{i}",
541
591
  uri=f"{uri}#chunk-{i}",
542
592
  ordinal=i,
543
593
  content=chunk,
544
594
  category="document",
545
- tenant_id=user_id or "default", # Required field
595
+ tenant_id=user_id, # None = public/shared
546
596
  user_id=user_id,
547
597
  )
548
598
  for i, chunk in enumerate(chunks)
549
599
  ]
550
600
 
551
- if self.resource_repo:
552
- await self.resource_repo.upsert(
553
- resources,
554
- embeddable_fields=["content"],
555
- generate_embeddings=True,
556
- )
557
- logger.info(f"Saved {len(resources)} Resource chunks")
558
- logger.info(f"Queued {len(resources)} embedding generation tasks for content field")
601
+ # Save resources to the appropriate table
602
+ if resources:
603
+ from rem.services.postgres import get_postgres_service
604
+
605
+ postgres = get_postgres_service()
606
+ if postgres:
607
+ await postgres.connect()
608
+ try:
609
+ await postgres.batch_upsert(
610
+ records=cast(list[BaseModel | dict], resources),
611
+ model=resource_model,
612
+ table_name=table_name,
613
+ entity_key_field="name",
614
+ embeddable_fields=["content"],
615
+ generate_embeddings=True,
616
+ )
617
+ logger.info(f"Saved {len(resources)} {resource_model.__name__} chunks to {table_name}")
618
+ logger.info(f"Queued {len(resources)} embedding generation tasks for content field")
619
+ finally:
620
+ await postgres.disconnect()
621
+ elif self.resource_repo:
622
+ # Fallback to injected repo (only works for default Resource)
623
+ await self.resource_repo.upsert(
624
+ resources,
625
+ embeddable_fields=["content"],
626
+ generate_embeddings=True,
627
+ )
628
+ logger.info(f"Saved {len(resources)} Resource chunks")
629
+ logger.info(f"Queued {len(resources)} embedding generation tasks for content field")
559
630
 
560
631
  return {
561
632
  "file": file.model_dump(),
@@ -595,9 +666,10 @@ class ContentService:
595
666
  # IMPORTANT: category field distinguishes agents from evaluators
596
667
  # - kind=agent → category="agent" (AI agents with tools/resources)
597
668
  # - kind=evaluator → category="evaluator" (LLM-as-a-Judge evaluators)
669
+ # Schemas (agents/evaluators) default to system tenant for shared access
598
670
  schema_entity = Schema(
599
- tenant_id=user_id or "default",
600
- user_id=user_id,
671
+ tenant_id="system",
672
+ user_id=None,
601
673
  name=name,
602
674
  spec=schema_data,
603
675
  category=kind, # Maps kind → category for database filtering
@@ -667,7 +739,7 @@ class ContentService:
667
739
  processor = EngramProcessor(postgres)
668
740
  result = await processor.process_engram(
669
741
  data=data,
670
- tenant_id=user_id or "default",
742
+ tenant_id=user_id, # None = public/shared
671
743
  user_id=user_id,
672
744
  )
673
745
  logger.info(f"✅ Engram processed: {result.get('resource_id')} with {len(result.get('moment_ids', []))} moments")
@@ -8,12 +8,11 @@ vector similarity (fast) or LLM analysis (intelligent).
8
8
  import json
9
9
  from datetime import datetime, timedelta
10
10
  from enum import Enum
11
- from pathlib import Path
12
11
  from typing import Any, Optional
13
12
 
14
- import yaml
15
13
  from loguru import logger
16
14
 
15
+ from ...utils.schema_loader import load_agent_schema
17
16
  from ...agentic.providers.pydantic_ai import create_agent
18
17
  from ...agentic.serialization import serialize_agent_result
19
18
  from ...models.core import QueryType, RemQuery, SearchParameters
@@ -125,20 +124,7 @@ async def build_affinity(
125
124
  # Load LLM agent for relationship assessment if needed
126
125
  affinity_agent = None
127
126
  if mode == AffinityMode.LLM:
128
- schema_path = (
129
- Path(__file__).parent.parent.parent
130
- / "schemas"
131
- / "agents"
132
- / "resource-affinity-assessor.yaml"
133
- )
134
-
135
- if not schema_path.exists():
136
- raise FileNotFoundError(
137
- f"ResourceAffinityAssessor schema not found: {schema_path}"
138
- )
139
-
140
- with open(schema_path) as f:
141
- agent_schema = yaml.safe_load(f)
127
+ agent_schema = load_agent_schema("resource-affinity-assessor")
142
128
 
143
129
  affinity_agent_runtime = await create_agent(
144
130
  agent_schema_override=agent_schema,
@@ -8,13 +8,12 @@ with temporal boundaries and metadata.
8
8
 
9
9
  import json
10
10
  from datetime import datetime, timedelta
11
- from pathlib import Path
12
11
  from typing import Any, Optional
13
12
  from uuid import uuid4
14
13
 
15
- import yaml
16
14
  from loguru import logger
17
15
 
16
+ from ...utils.schema_loader import load_agent_schema
18
17
  from ...agentic.providers.pydantic_ai import create_agent
19
18
  from ...agentic.serialization import serialize_agent_result
20
19
  from ...models.entities.moment import Moment, Person
@@ -101,19 +100,7 @@ async def construct_moments(
101
100
  }
102
101
 
103
102
  # Load MomentBuilder agent schema
104
- schema_path = (
105
- Path(__file__).parent.parent.parent
106
- / "schemas"
107
- / "agents"
108
- / "core"
109
- / "moment-builder.yaml"
110
- )
111
-
112
- if not schema_path.exists():
113
- raise FileNotFoundError(f"MomentBuilder schema not found: {schema_path}")
114
-
115
- with open(schema_path) as f:
116
- agent_schema = yaml.safe_load(f)
103
+ agent_schema = load_agent_schema("moment-builder")
117
104
 
118
105
  # Prepare input data for agent
119
106
  input_data = {