remdb 0.3.7__py3-none-any.whl → 0.3.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. rem/__init__.py +129 -2
  2. rem/agentic/context.py +7 -5
  3. rem/agentic/providers/phoenix.py +32 -43
  4. rem/api/README.md +23 -0
  5. rem/api/main.py +27 -2
  6. rem/api/middleware/tracking.py +172 -0
  7. rem/api/routers/auth.py +54 -0
  8. rem/api/routers/chat/completions.py +1 -1
  9. rem/cli/commands/ask.py +13 -10
  10. rem/cli/commands/configure.py +4 -3
  11. rem/cli/commands/db.py +17 -3
  12. rem/cli/commands/experiments.py +76 -72
  13. rem/cli/commands/process.py +8 -7
  14. rem/cli/commands/scaffold.py +47 -0
  15. rem/cli/main.py +2 -0
  16. rem/models/entities/user.py +10 -3
  17. rem/registry.py +367 -0
  18. rem/services/content/providers.py +92 -133
  19. rem/services/dreaming/affinity_service.py +2 -16
  20. rem/services/dreaming/moment_service.py +2 -15
  21. rem/services/embeddings/api.py +20 -13
  22. rem/services/phoenix/EXPERIMENT_DESIGN.md +3 -3
  23. rem/services/phoenix/client.py +148 -14
  24. rem/services/postgres/schema_generator.py +86 -5
  25. rem/services/rate_limit.py +113 -0
  26. rem/services/rem/README.md +14 -0
  27. rem/services/user_service.py +98 -0
  28. rem/settings.py +79 -10
  29. rem/sql/install_models.sql +13 -0
  30. rem/sql/migrations/003_seed_default_user.sql +48 -0
  31. rem/utils/constants.py +97 -0
  32. rem/utils/date_utils.py +228 -0
  33. rem/utils/embeddings.py +17 -4
  34. rem/utils/files.py +167 -0
  35. rem/utils/mime_types.py +158 -0
  36. rem/utils/schema_loader.py +63 -14
  37. rem/utils/vision.py +9 -14
  38. rem/workers/README.md +14 -14
  39. rem/workers/db_maintainer.py +74 -0
  40. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/METADATA +169 -121
  41. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/RECORD +43 -32
  42. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/WHEEL +0 -0
  43. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,74 @@
1
+ """
2
+ Database Maintainer Worker.
3
+
4
+ Handles background maintenance tasks for PostgreSQL:
5
+ 1. Cleaning up expired rate limit counters (UNLOGGED table).
6
+ 2. Refreshing materialized views (if any).
7
+ 3. Vacuuming specific tables (if needed).
8
+
9
+ Usage:
10
+ python -m rem.workers.db_maintainer
11
+
12
+ # Or via docker-compose:
13
+ # command: python -m rem.workers.db_maintainer
14
+ """
15
+
16
+ import asyncio
17
+ import signal
18
+ from loguru import logger
19
+
20
+ from ..services.postgres.service import PostgresService
21
+ from ..services.rate_limit import RateLimitService
22
+
23
+ class DatabaseMaintainer:
24
+ def __init__(self):
25
+ self.running = False
26
+ self.db = PostgresService()
27
+ self.rate_limiter = RateLimitService(self.db)
28
+
29
+ async def start(self):
30
+ """Start maintenance loop."""
31
+ self.running = True
32
+ logger.info("Starting Database Maintainer Worker")
33
+
34
+ await self.db.connect()
35
+
36
+ try:
37
+ while self.running:
38
+ await self._run_maintenance_cycle()
39
+ # Sleep for 5 minutes
40
+ await asyncio.sleep(300)
41
+ finally:
42
+ await self.db.disconnect()
43
+
44
+ async def _run_maintenance_cycle(self):
45
+ """Execute maintenance tasks."""
46
+ logger.debug("Running maintenance cycle...")
47
+
48
+ try:
49
+ # 1. Cleanup Rate Limits
50
+ await self.rate_limiter.cleanup_expired()
51
+
52
+ # 2. (Future) Refresh Views
53
+ # await self.db.execute("REFRESH MATERIALIZED VIEW ...")
54
+
55
+ except Exception as e:
56
+ logger.error(f"Maintenance cycle failed: {e}")
57
+
58
+ def stop(self):
59
+ """Stop worker gracefully."""
60
+ self.running = False
61
+ logger.info("Stopping Database Maintainer Worker...")
62
+
63
+ async def main():
64
+ worker = DatabaseMaintainer()
65
+
66
+ # Handle signals
67
+ loop = asyncio.get_running_loop()
68
+ for sig in (signal.SIGTERM, signal.SIGINT):
69
+ loop.add_signal_handler(sig, worker.stop)
70
+
71
+ await worker.start()
72
+
73
+ if __name__ == "__main__":
74
+ asyncio.run(main())
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: remdb
3
- Version: 0.3.7
3
+ Version: 0.3.14
4
4
  Summary: Resources Entities Moments - Bio-inspired memory system for agentic AI workloads
5
5
  Project-URL: Homepage, https://github.com/Percolation-Labs/reminiscent
6
6
  Project-URL: Documentation, https://github.com/Percolation-Labs/reminiscent/blob/main/README.md
@@ -23,11 +23,10 @@ Requires-Dist: click>=8.1.0
23
23
  Requires-Dist: fastapi>=0.115.0
24
24
  Requires-Dist: fastmcp>=0.5.0
25
25
  Requires-Dist: gitpython>=3.1.45
26
- Requires-Dist: gmft
27
26
  Requires-Dist: hypercorn>=0.17.0
28
27
  Requires-Dist: itsdangerous>=2.0.0
29
28
  Requires-Dist: json-schema-to-pydantic>=0.2.0
30
- Requires-Dist: kreuzberg>=3.21.0
29
+ Requires-Dist: kreuzberg<4.0.0,>=3.21.0
31
30
  Requires-Dist: loguru>=0.7.0
32
31
  Requires-Dist: openinference-instrumentation-pydantic-ai>=0.1.0
33
32
  Requires-Dist: opentelemetry-api>=1.28.0
@@ -48,7 +47,6 @@ Requires-Dist: requests>=2.32.0
48
47
  Requires-Dist: semchunk>=2.2.0
49
48
  Requires-Dist: tenacity>=9.0.0
50
49
  Requires-Dist: tiktoken>=0.5.0
51
- Requires-Dist: torch>=2.0.0
52
50
  Requires-Dist: uvicorn[standard]>=0.32.0
53
51
  Provides-Extra: all
54
52
  Requires-Dist: ipdb>=0.13.0; extra == 'all'
@@ -137,59 +135,50 @@ Choose your path:
137
135
  **Best for**: First-time users who want to explore REM with curated example datasets.
138
136
 
139
137
  ```bash
140
- # Install system dependencies
141
- # macOS:
142
- brew install tesseract
143
-
144
- # Linux:
145
- sudo apt-get install tesseract-ocr
138
+ # Install system dependencies (tesseract for OCR)
139
+ brew install tesseract # macOS (Linux/Windows: see tesseract-ocr.github.io)
146
140
 
147
141
  # Install remdb
148
- pip install remdb[all]
142
+ pip install "remdb[all]"
149
143
 
150
144
  # Clone example datasets
151
145
  git clone https://github.com/Percolation-Labs/remstack-lab.git
152
146
  cd remstack-lab
153
147
 
154
- # Configure REM (interactive wizard)
155
- rem configure --install
148
+ # Start PostgreSQL with docker-compose
149
+ curl -O https://gist.githubusercontent.com/percolating-sirsh/d117b673bc0edfdef1a5068ccd3cf3e5/raw/docker-compose.prebuilt.yml
150
+ docker compose -f docker-compose.prebuilt.yml up -d postgres
156
151
 
157
- # Start PostgreSQL
158
- docker run -d \
159
- --name rem-postgres \
160
- -e POSTGRES_USER=rem \
161
- -e POSTGRES_PASSWORD=rem \
162
- -e POSTGRES_DB=rem \
163
- -p 5050:5432 \
164
- pgvector/pgvector:pg18
152
+ # Configure REM (creates ~/.rem/config.yaml and installs database schema)
153
+ # Add --claude-desktop to register with Claude Desktop app
154
+ rem configure --install --claude-desktop
165
155
 
166
- # Load quickstart dataset
167
- rem db load datasets/quickstart/sample_data.yaml --user-id demo-user
156
+ # Load quickstart dataset (uses default user)
157
+ rem db load datasets/quickstart/sample_data.yaml
168
158
 
169
159
  # Optional: Set default LLM provider via environment variable
170
160
  # export LLM__DEFAULT_MODEL="openai:gpt-4.1-nano" # Fast and cheap
171
161
  # export LLM__DEFAULT_MODEL="anthropic:claude-sonnet-4-5-20250929" # High quality (default)
172
162
 
173
163
  # Ask questions
174
- rem ask --user-id demo-user "What documents exist in the system?"
175
- rem ask --user-id demo-user "Show me meetings about API design"
164
+ rem ask "What documents exist in the system?"
165
+ rem ask "Show me meetings about API design"
176
166
 
177
167
  # Ingest files (PDF, DOCX, images, etc.) - note: requires remstack-lab
178
- rem process ingest datasets/formats/files/bitcoin_whitepaper.pdf --user-id demo-user --category research --tags bitcoin,whitepaper
168
+ rem process ingest datasets/formats/files/bitcoin_whitepaper.pdf --category research --tags bitcoin,whitepaper
179
169
 
180
170
  # Query ingested content
181
- rem ask --user-id demo-user "What is the Bitcoin whitepaper about?"
171
+ rem ask "What is the Bitcoin whitepaper about?"
182
172
 
183
- # Try other datasets
184
- rem db load --file datasets/domains/recruitment/scenarios/candidate_pipeline/data.yaml --user-id my-company
185
- rem ask --user-id my-company "Show me candidates with Python experience"
173
+ # Try other datasets (use --user-id for multi-tenant scenarios)
174
+ rem db load datasets/domains/recruitment/scenarios/candidate_pipeline/data.yaml --user-id acme-corp
175
+ rem ask --user-id acme-corp "Show me candidates with Python experience"
186
176
  ```
187
177
 
188
178
  **What you get:**
189
179
  - Quickstart: 3 users, 3 resources, 3 moments, 4 messages
190
180
  - Domain datasets: recruitment, legal, enterprise, misc
191
181
  - Format examples: engrams, documents, conversations, files
192
- - Jupyter notebooks and experiments
193
182
 
194
183
  **Learn more**: [remstack-lab repository](https://github.com/Percolation-Labs/remstack-lab)
195
184
 
@@ -270,28 +259,28 @@ Configuration saved to `~/.rem/config.yaml` (can edit with `rem configure --edit
270
259
  # Clone datasets repository
271
260
  git clone https://github.com/Percolation-Labs/remstack-lab.git
272
261
 
273
- # Load quickstart dataset
274
- rem db load --file remstack-lab/datasets/quickstart/sample_data.yaml --user-id demo-user
262
+ # Load quickstart dataset (uses default user)
263
+ rem db load --file remstack-lab/datasets/quickstart/sample_data.yaml
275
264
 
276
265
  # Test with sample queries
277
- rem ask --user-id demo-user "What documents exist in the system?"
278
- rem ask --user-id demo-user "Show me meetings about API design"
279
- rem ask --user-id demo-user "Who is Sarah Chen?"
266
+ rem ask "What documents exist in the system?"
267
+ rem ask "Show me meetings about API design"
268
+ rem ask "Who is Sarah Chen?"
280
269
 
281
- # Try domain-specific datasets
282
- rem db load --file remstack-lab/datasets/domains/recruitment/scenarios/candidate_pipeline/data.yaml --user-id my-company
283
- rem ask --user-id my-company "Show me candidates with Python experience"
270
+ # Try domain-specific datasets (use --user-id for multi-tenant scenarios)
271
+ rem db load --file remstack-lab/datasets/domains/recruitment/scenarios/candidate_pipeline/data.yaml --user-id acme-corp
272
+ rem ask --user-id acme-corp "Show me candidates with Python experience"
284
273
  ```
285
274
 
286
275
  **Option B: Bring your own data**
287
276
 
288
277
  ```bash
289
- # Ingest your own files
278
+ # Ingest your own files (uses default user)
290
279
  echo "REM is a bio-inspired memory system for agentic AI workloads." > test-doc.txt
291
- rem process ingest test-doc.txt --user-id test-user --category documentation --tags rem,ai
280
+ rem process ingest test-doc.txt --category documentation --tags rem,ai
292
281
 
293
282
  # Query your ingested data
294
- rem ask --user-id test-user "What do you know about REM from my knowledge base?"
283
+ rem ask "What do you know about REM from my knowledge base?"
295
284
  ```
296
285
 
297
286
  ### Step 4: Test the API
@@ -328,13 +317,13 @@ curl -X POST http://localhost:8000/api/v1/chat/completions \
328
317
  ```bash
329
318
  cd remstack-lab
330
319
 
331
- # Load any dataset
332
- rem db load --file datasets/quickstart/sample_data.yaml --user-id demo-user
320
+ # Load any dataset (uses default user)
321
+ rem db load --file datasets/quickstart/sample_data.yaml
333
322
 
334
323
  # Explore formats
335
- rem db load --file datasets/formats/engrams/scenarios/team_meeting/team_standup_meeting.yaml --user-id demo-user
324
+ rem db load --file datasets/formats/engrams/scenarios/team_meeting/team_standup_meeting.yaml
336
325
 
337
- # Try domain-specific examples
326
+ # Try domain-specific examples (use --user-id for multi-tenant scenarios)
338
327
  rem db load --file datasets/domains/recruitment/scenarios/candidate_pipeline/data.yaml --user-id acme-corp
339
328
  ```
340
329
 
@@ -429,30 +418,24 @@ json_schema_extra:
429
418
  ```bash
430
419
  # Ingest the schema (stores in database schemas table)
431
420
  rem process ingest my-research-assistant.yaml \
432
- --user-id my-user \
433
421
  --category agents \
434
422
  --tags custom,research
435
423
 
436
424
  # Verify schema is in database (should show schema details)
437
- rem ask "LOOKUP 'my-research-assistant' FROM schemas" --user-id my-user
425
+ rem ask "LOOKUP 'my-research-assistant' FROM schemas"
438
426
  ```
439
427
 
440
428
  **Step 3: Use Your Custom Agent**
441
429
 
442
430
  ```bash
443
431
  # Run a query with your custom agent
444
- rem ask research-assistant "Find documents about machine learning architecture" \
445
- --user-id my-user
432
+ rem ask research-assistant "Find documents about machine learning architecture"
446
433
 
447
434
  # With streaming
448
- rem ask research-assistant "Summarize recent API design documents" \
449
- --user-id my-user \
450
- --stream
435
+ rem ask research-assistant "Summarize recent API design documents" --stream
451
436
 
452
437
  # With session continuity
453
- rem ask research-assistant "What did we discuss about ML?" \
454
- --user-id my-user \
455
- --session-id abc-123
438
+ rem ask research-assistant "What did we discuss about ML?" --session-id abc-123
456
439
  ```
457
440
 
458
441
  ### Agent Schema Structure
@@ -523,10 +506,10 @@ Custom agents can also be used as **ontology extractors** to extract structured
523
506
  **Schema not found error:**
524
507
  ```bash
525
508
  # Check if schema was ingested correctly
526
- rem ask "SEARCH 'my-agent' FROM schemas" --user-id my-user
509
+ rem ask "SEARCH 'my-agent' FROM schemas"
527
510
 
528
- # List all schemas for your user
529
- rem ask "SELECT name, category, created_at FROM schemas ORDER BY created_at DESC LIMIT 10" --user-id my-user
511
+ # List all schemas
512
+ rem ask "SELECT name, category, created_at FROM schemas ORDER BY created_at DESC LIMIT 10"
530
513
  ```
531
514
 
532
515
  **Agent not loading tools:**
@@ -959,22 +942,14 @@ rem db schema validate --models src/rem/models/entities
959
942
  Process files with optional custom extractor (ontology extraction).
960
943
 
961
944
  ```bash
962
- # Process all completed files for tenant
963
- rem process files \
964
- --tenant-id acme-corp \
965
- --status completed \
966
- --limit 10
945
+ # Process all completed files
946
+ rem process files --status completed --limit 10
967
947
 
968
948
  # Process with custom extractor
969
- rem process files \
970
- --tenant-id acme-corp \
971
- --extractor cv-parser-v1 \
972
- --limit 50
949
+ rem process files --extractor cv-parser-v1 --limit 50
973
950
 
974
- # Process files from the last 7 days
975
- rem process files \
976
- --tenant-id acme-corp \
977
- --lookback-hours 168
951
+ # Process files for specific user
952
+ rem process files --user-id user-123 --status completed
978
953
  ```
979
954
 
980
955
  #### `rem process ingest` - Ingest File into REM
@@ -982,14 +957,13 @@ rem process files \
982
957
  Ingest a file into REM with full pipeline (storage + parsing + embedding + database).
983
958
 
984
959
  ```bash
985
- # Ingest local file
960
+ # Ingest local file with metadata
986
961
  rem process ingest /path/to/document.pdf \
987
- --user-id user-123 \
988
962
  --category legal \
989
963
  --tags contract,2024
990
964
 
991
965
  # Ingest with minimal options
992
- rem process ingest ./meeting-notes.md --user-id user-123
966
+ rem process ingest ./meeting-notes.md
993
967
  ```
994
968
 
995
969
  #### `rem process uri` - Parse File (Read-Only)
@@ -1014,28 +988,17 @@ rem process uri s3://bucket/key.docx --output text
1014
988
  Run full dreaming workflow: extractors → moments → affinity → user model.
1015
989
 
1016
990
  ```bash
1017
- # Full workflow for user
1018
- rem dreaming full \
1019
- --user-id user-123 \
1020
- --tenant-id acme-corp
991
+ # Full workflow (uses default user from settings)
992
+ rem dreaming full
1021
993
 
1022
994
  # Skip ontology extractors
1023
- rem dreaming full \
1024
- --user-id user-123 \
1025
- --tenant-id acme-corp \
1026
- --skip-extractors
995
+ rem dreaming full --skip-extractors
1027
996
 
1028
997
  # Process last 24 hours only
1029
- rem dreaming full \
1030
- --user-id user-123 \
1031
- --tenant-id acme-corp \
1032
- --lookback-hours 24
998
+ rem dreaming full --lookback-hours 24
1033
999
 
1034
- # Limit resources processed
1035
- rem dreaming full \
1036
- --user-id user-123 \
1037
- --tenant-id acme-corp \
1038
- --limit 100
1000
+ # Limit resources processed for specific user
1001
+ rem dreaming full --user-id user-123 --limit 100
1039
1002
  ```
1040
1003
 
1041
1004
  #### `rem dreaming custom` - Custom Extractor
@@ -1043,16 +1006,11 @@ rem dreaming full \
1043
1006
  Run specific ontology extractor on user's data.
1044
1007
 
1045
1008
  ```bash
1046
- # Run CV parser on user's files
1047
- rem dreaming custom \
1048
- --user-id user-123 \
1049
- --tenant-id acme-corp \
1050
- --extractor cv-parser-v1
1009
+ # Run CV parser on files
1010
+ rem dreaming custom --extractor cv-parser-v1
1051
1011
 
1052
- # Process last week's files
1012
+ # Process last week's files with limit
1053
1013
  rem dreaming custom \
1054
- --user-id user-123 \
1055
- --tenant-id acme-corp \
1056
1014
  --extractor contract-analyzer-v1 \
1057
1015
  --lookback-hours 168 \
1058
1016
  --limit 50
@@ -1063,17 +1021,11 @@ rem dreaming custom \
1063
1021
  Extract temporal narratives from resources.
1064
1022
 
1065
1023
  ```bash
1066
- # Generate moments for user
1067
- rem dreaming moments \
1068
- --user-id user-123 \
1069
- --tenant-id acme-corp \
1070
- --limit 50
1024
+ # Generate moments
1025
+ rem dreaming moments --limit 50
1071
1026
 
1072
1027
  # Process last 7 days
1073
- rem dreaming moments \
1074
- --user-id user-123 \
1075
- --tenant-id acme-corp \
1076
- --lookback-hours 168
1028
+ rem dreaming moments --lookback-hours 168
1077
1029
  ```
1078
1030
 
1079
1031
  #### `rem dreaming affinity` - Build Relationships
@@ -1081,17 +1033,11 @@ rem dreaming moments \
1081
1033
  Build semantic relationships between resources using embeddings.
1082
1034
 
1083
1035
  ```bash
1084
- # Build affinity graph for user
1085
- rem dreaming affinity \
1086
- --user-id user-123 \
1087
- --tenant-id acme-corp \
1088
- --limit 100
1036
+ # Build affinity graph
1037
+ rem dreaming affinity --limit 100
1089
1038
 
1090
1039
  # Process recent resources only
1091
- rem dreaming affinity \
1092
- --user-id user-123 \
1093
- --tenant-id acme-corp \
1094
- --lookback-hours 24
1040
+ rem dreaming affinity --lookback-hours 24
1095
1041
  ```
1096
1042
 
1097
1043
  #### `rem dreaming user-model` - Update User Model
@@ -1100,9 +1046,7 @@ Update user model from recent activity (preferences, interests, patterns).
1100
1046
 
1101
1047
  ```bash
1102
1048
  # Update user model
1103
- rem dreaming user-model \
1104
- --user-id user-123 \
1105
- --tenant-id acme-corp
1049
+ rem dreaming user-model
1106
1050
  ```
1107
1051
 
1108
1052
  ### Evaluation & Experiments
@@ -1468,6 +1412,110 @@ TraverseQuery ::= TRAVERSE [<edge_types:list>] WITH <initial_query:Query> [DEPTH
1468
1412
 
1469
1413
  **Stage 4** (100% answerable): Mature graph with rich historical data. All query types fully functional with high-quality results.
1470
1414
 
1415
+ ## Troubleshooting
1416
+
1417
+ ### Apple Silicon Mac: "Failed to build kreuzberg" Error
1418
+
1419
+ **Problem**: Installation fails with `ERROR: Failed building wheel for kreuzberg` on Apple Silicon Macs.
1420
+
1421
+ **Root Cause**: REM uses `kreuzberg>=4.0.0rc1` for document parsing with native ONNX/Rust table extraction. Kreuzberg 4.0.0rc1 provides pre-built wheels for ARM64 macOS (`macosx_14_0_arm64.whl`) but NOT for x86_64 (Intel) macOS. If you're using an x86_64 Python binary (running under Rosetta 2), pip cannot find a compatible wheel and attempts to build from source, which fails.
1422
+
1423
+ **Solution**: Use ARM64 (native) Python instead of x86_64 Python.
1424
+
1425
+ **Step 1: Verify your Python architecture**
1426
+
1427
+ ```bash
1428
+ python3 -c "import platform; print(f'Machine: {platform.machine()}')"
1429
+ ```
1430
+
1431
+ - **Correct**: `Machine: arm64` (native ARM Python)
1432
+ - **Wrong**: `Machine: x86_64` (Intel Python under Rosetta)
1433
+
1434
+ **Step 2: Install ARM Python via Homebrew** (if not already installed)
1435
+
1436
+ ```bash
1437
+ # Install ARM Python
1438
+ brew install python@3.12
1439
+
1440
+ # Verify it's ARM
1441
+ /opt/homebrew/bin/python3.12 -c "import platform; print(platform.machine())"
1442
+ # Should output: arm64
1443
+ ```
1444
+
1445
+ **Step 3: Create venv with ARM Python**
1446
+
1447
+ ```bash
1448
+ # Use full path to ARM Python
1449
+ /opt/homebrew/bin/python3.12 -m venv .venv
1450
+
1451
+ # Activate and install
1452
+ source .venv/bin/activate
1453
+ pip install "remdb[all]"
1454
+ ```
1455
+
1456
+ **Why This Happens**: Some users have both Intel Homebrew (`/usr/local`) and ARM Homebrew (`/opt/homebrew`) installed. If your system `python3` points to the Intel version at `/usr/local/bin/python3`, you'll hit this issue. The fix is to explicitly use the ARM Python from `/opt/homebrew/bin/python3.12`.
1457
+
1458
+ **Verification**: After successful installation, you should see:
1459
+ ```
1460
+ Using cached kreuzberg-4.0.0rc1-cp310-abi3-macosx_14_0_arm64.whl (19.8 MB)
1461
+ Successfully installed ... kreuzberg-4.0.0rc1 ... remdb-0.3.10
1462
+ ```
1463
+
1464
+ ## Using REM as a Library
1465
+
1466
+ REM wraps FastAPI - extend it exactly as you would any FastAPI app.
1467
+
1468
+ ```python
1469
+ import rem
1470
+ from rem import create_app
1471
+ from rem.models.core import CoreModel
1472
+
1473
+ # 1. Register models (for schema generation)
1474
+ rem.register_models(MyModel, AnotherModel)
1475
+
1476
+ # 2. Register schema paths (for custom agents/evaluators)
1477
+ rem.register_schema_path("./schemas")
1478
+
1479
+ # 3. Create app
1480
+ app = create_app()
1481
+
1482
+ # 4. Extend like normal FastAPI
1483
+ app.include_router(my_router)
1484
+
1485
+ @app.mcp_server.tool()
1486
+ async def my_tool(query: str) -> dict:
1487
+ """Custom MCP tool."""
1488
+ return {"result": query}
1489
+ ```
1490
+
1491
+ ### Project Structure
1492
+
1493
+ ```
1494
+ my-rem-app/
1495
+ ├── my_app/
1496
+ │ ├── main.py # Entry point (create_app + extensions)
1497
+ │ ├── models.py # Custom models (inherit CoreModel)
1498
+ │ └── routers/ # Custom FastAPI routers
1499
+ ├── schemas/
1500
+ │ ├── agents/ # Custom agent YAML schemas
1501
+ │ └── evaluators/ # Custom evaluator schemas
1502
+ ├── sql/migrations/ # Custom SQL migrations
1503
+ └── pyproject.toml
1504
+ ```
1505
+
1506
+ Generate this structure with: `rem scaffold my-app` *(coming soon)*
1507
+
1508
+ ### Extension Points
1509
+
1510
+ | Extension | How |
1511
+ |-----------|-----|
1512
+ | **Routes** | `app.include_router(router)` or `@app.get()` |
1513
+ | **MCP Tools** | `@app.mcp_server.tool()` decorator or `app.mcp_server.add_tool(fn)` |
1514
+ | **MCP Resources** | `@app.mcp_server.resource("uri://...")` or `app.mcp_server.add_resource(fn)` |
1515
+ | **MCP Prompts** | `@app.mcp_server.prompt()` or `app.mcp_server.add_prompt(fn)` |
1516
+ | **Models** | `rem.register_models(Model)` then `rem db schema generate` |
1517
+ | **Agent Schemas** | `rem.register_schema_path("./schemas")` or `SCHEMA__PATHS` env var |
1518
+
1471
1519
  ## License
1472
1520
 
1473
1521
  MIT