remdb 0.3.172__py3-none-any.whl → 0.3.223__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of remdb might be problematic. Click here for more details.
- rem/agentic/README.md +262 -2
- rem/agentic/context.py +173 -0
- rem/agentic/context_builder.py +12 -2
- rem/agentic/mcp/tool_wrapper.py +39 -16
- rem/agentic/providers/pydantic_ai.py +46 -43
- rem/agentic/schema.py +2 -2
- rem/agentic/tools/rem_tools.py +11 -0
- rem/api/main.py +1 -1
- rem/api/mcp_router/resources.py +64 -8
- rem/api/mcp_router/server.py +31 -24
- rem/api/mcp_router/tools.py +621 -166
- rem/api/routers/admin.py +30 -4
- rem/api/routers/auth.py +114 -15
- rem/api/routers/chat/completions.py +66 -18
- rem/api/routers/chat/sse_events.py +7 -3
- rem/api/routers/chat/streaming.py +254 -22
- rem/api/routers/common.py +18 -0
- rem/api/routers/dev.py +7 -1
- rem/api/routers/feedback.py +9 -1
- rem/api/routers/messages.py +176 -38
- rem/api/routers/models.py +9 -1
- rem/api/routers/query.py +12 -1
- rem/api/routers/shared_sessions.py +16 -0
- rem/auth/jwt.py +19 -4
- rem/auth/middleware.py +42 -28
- rem/cli/README.md +62 -0
- rem/cli/commands/ask.py +1 -1
- rem/cli/commands/db.py +148 -70
- rem/cli/commands/process.py +171 -43
- rem/models/entities/ontology.py +91 -101
- rem/schemas/agents/rem.yaml +1 -1
- rem/services/content/service.py +18 -5
- rem/services/email/service.py +11 -2
- rem/services/embeddings/worker.py +26 -12
- rem/services/postgres/__init__.py +28 -3
- rem/services/postgres/diff_service.py +57 -5
- rem/services/postgres/programmable_diff_service.py +635 -0
- rem/services/postgres/pydantic_to_sqlalchemy.py +2 -2
- rem/services/postgres/register_type.py +12 -11
- rem/services/postgres/repository.py +46 -25
- rem/services/postgres/schema_generator.py +5 -5
- rem/services/postgres/sql_builder.py +6 -5
- rem/services/session/__init__.py +8 -1
- rem/services/session/compression.py +40 -2
- rem/services/session/pydantic_messages.py +276 -0
- rem/settings.py +28 -0
- rem/sql/background_indexes.sql +5 -0
- rem/sql/migrations/001_install.sql +157 -10
- rem/sql/migrations/002_install_models.sql +160 -132
- rem/sql/migrations/004_cache_system.sql +7 -275
- rem/sql/migrations/migrate_session_id_to_uuid.sql +45 -0
- rem/utils/model_helpers.py +101 -0
- rem/utils/schema_loader.py +6 -6
- {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/METADATA +1 -1
- {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/RECORD +57 -53
- {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/WHEEL +0 -0
- {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/entry_points.txt +0 -0
rem/cli/README.md
CHANGED
|
@@ -434,6 +434,68 @@ Ensure you're using the correct model format:
|
|
|
434
434
|
- OpenAI: `openai:gpt-4o-mini`, `openai:gpt-4o`
|
|
435
435
|
- Anthropic: `anthropic:claude-sonnet-4-5-20250929`
|
|
436
436
|
|
|
437
|
+
## Data Visibility: PUBLIC vs PRIVATE
|
|
438
|
+
|
|
439
|
+
**IMPORTANT: All ingested data is PUBLIC by default.** This is the correct behavior
|
|
440
|
+
for shared knowledge bases (ontologies, procedures, reference data).
|
|
441
|
+
|
|
442
|
+
### Why PUBLIC by Default?
|
|
443
|
+
|
|
444
|
+
Most data in REM should be searchable by all users:
|
|
445
|
+
- Clinical ontologies (disorders, symptoms, drugs)
|
|
446
|
+
- Procedures and protocols (SCID-5, PHQ-9, etc.)
|
|
447
|
+
- Reference documentation
|
|
448
|
+
- Shared domain knowledge
|
|
449
|
+
|
|
450
|
+
The `rem_lookup()` function searches for data where `user_id IS NULL`, which means
|
|
451
|
+
public data. If you set `user_id` on data, it becomes invisible to other users.
|
|
452
|
+
|
|
453
|
+
### Ingesting Public Data (Default)
|
|
454
|
+
|
|
455
|
+
```bash
|
|
456
|
+
# Standard ingestion - data is PUBLIC
|
|
457
|
+
rem process ingest ontology/procedures/ --table ontologies
|
|
458
|
+
|
|
459
|
+
# From S3 - also PUBLIC
|
|
460
|
+
rem process ingest s3://bucket/docs/reference.pdf
|
|
461
|
+
```
|
|
462
|
+
|
|
463
|
+
### Ingesting Private Data (Rare)
|
|
464
|
+
|
|
465
|
+
Private data requires explicit `--make-private` flag:
|
|
466
|
+
|
|
467
|
+
```bash
|
|
468
|
+
# Private user data - requires --make-private and --user-id
|
|
469
|
+
rem process ingest personal-notes.md --make-private --user-id user-123
|
|
470
|
+
```
|
|
471
|
+
|
|
472
|
+
**When to use private data:**
|
|
473
|
+
- User-uploaded personal documents
|
|
474
|
+
- Session-specific content
|
|
475
|
+
- User notes and annotations
|
|
476
|
+
|
|
477
|
+
**NEVER use private data for:**
|
|
478
|
+
- Ontologies and reference material
|
|
479
|
+
- Clinical procedures and protocols
|
|
480
|
+
- Shared knowledge bases
|
|
481
|
+
- Anything that should be searchable by agents
|
|
482
|
+
|
|
483
|
+
### Common Mistake
|
|
484
|
+
|
|
485
|
+
If agents can't find data via `search_rem`, the most common cause is that the data
|
|
486
|
+
was ingested with a `user_id` set. Check with:
|
|
487
|
+
|
|
488
|
+
```sql
|
|
489
|
+
SELECT name, user_id FROM ontologies WHERE name = 'phq-9-procedure';
|
|
490
|
+
-- user_id should be NULL for public data
|
|
491
|
+
```
|
|
492
|
+
|
|
493
|
+
Fix by setting user_id to NULL:
|
|
494
|
+
```sql
|
|
495
|
+
UPDATE ontologies SET user_id = NULL WHERE user_id IS NOT NULL;
|
|
496
|
+
UPDATE kv_store SET user_id = NULL WHERE entity_type = 'ontologies' AND user_id IS NOT NULL;
|
|
497
|
+
```
|
|
498
|
+
|
|
437
499
|
## Next Steps
|
|
438
500
|
|
|
439
501
|
1. **Implement Schema Registry**
|
rem/cli/commands/ask.py
CHANGED
|
@@ -75,7 +75,7 @@ async def run_agent_streaming(
|
|
|
75
75
|
"""
|
|
76
76
|
Run agent in streaming mode using agent.iter() with usage limits.
|
|
77
77
|
|
|
78
|
-
Design Pattern
|
|
78
|
+
Design Pattern:
|
|
79
79
|
- Use agent.iter() for complete execution with tool call visibility
|
|
80
80
|
- run_stream() stops after first output, missing tool calls
|
|
81
81
|
- Stream tool call markers: [Calling: tool_name]
|
rem/cli/commands/db.py
CHANGED
|
@@ -333,64 +333,120 @@ def rebuild_cache(connection: str | None):
|
|
|
333
333
|
|
|
334
334
|
@click.command()
|
|
335
335
|
@click.argument("file_path", type=click.Path(exists=True, path_type=Path))
|
|
336
|
+
@click.option("--table", "-t", default=None, help="Target table name (required for non-YAML formats)")
|
|
336
337
|
@click.option("--user-id", default=None, help="User ID to scope data privately (default: public/shared)")
|
|
337
338
|
@click.option("--dry-run", is_flag=True, help="Show what would be loaded without loading")
|
|
338
|
-
def load(file_path: Path, user_id: str | None, dry_run: bool):
|
|
339
|
+
def load(file_path: Path, table: str | None, user_id: str | None, dry_run: bool):
|
|
339
340
|
"""
|
|
340
|
-
Load data from
|
|
341
|
+
Load data from file into database.
|
|
341
342
|
|
|
342
|
-
|
|
343
|
-
|
|
344
|
-
key_field: name
|
|
345
|
-
rows:
|
|
346
|
-
- name: Example
|
|
347
|
-
content: Test data...
|
|
343
|
+
Supports YAML with embedded metadata, or any tabular format via Polars
|
|
344
|
+
(jsonl, parquet, csv, json, arrow, etc.). For non-YAML formats, use --table.
|
|
348
345
|
|
|
349
346
|
Examples:
|
|
350
|
-
rem db load
|
|
351
|
-
rem db load data.
|
|
352
|
-
rem db load data.yaml --dry-run
|
|
347
|
+
rem db load data.yaml # YAML with metadata
|
|
348
|
+
rem db load data.jsonl -t resources # Any Polars-supported format
|
|
353
349
|
"""
|
|
354
|
-
asyncio.run(_load_async(file_path, user_id, dry_run))
|
|
350
|
+
asyncio.run(_load_async(file_path, table, user_id, dry_run))
|
|
355
351
|
|
|
356
352
|
|
|
357
|
-
|
|
353
|
+
def _load_dataframe_from_file(file_path: Path) -> "pl.DataFrame":
|
|
354
|
+
"""Load any Polars-supported file format into a DataFrame."""
|
|
355
|
+
import polars as pl
|
|
356
|
+
|
|
357
|
+
suffix = file_path.suffix.lower()
|
|
358
|
+
|
|
359
|
+
if suffix in {".jsonl", ".ndjson"}:
|
|
360
|
+
return pl.read_ndjson(file_path)
|
|
361
|
+
elif suffix in {".parquet", ".pq"}:
|
|
362
|
+
return pl.read_parquet(file_path)
|
|
363
|
+
elif suffix == ".csv":
|
|
364
|
+
return pl.read_csv(file_path)
|
|
365
|
+
elif suffix == ".json":
|
|
366
|
+
return pl.read_json(file_path)
|
|
367
|
+
elif suffix in {".ipc", ".arrow"}:
|
|
368
|
+
return pl.read_ipc(file_path)
|
|
369
|
+
else:
|
|
370
|
+
raise ValueError(f"Unsupported file format: {suffix}. Use any Polars-supported format.")
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
async def _load_async(file_path: Path, table: str | None, user_id: str | None, dry_run: bool):
|
|
358
374
|
"""Async implementation of load command."""
|
|
375
|
+
import polars as pl
|
|
359
376
|
import yaml
|
|
360
377
|
from ...models.core.inline_edge import InlineEdge
|
|
361
|
-
from ...models.entities import
|
|
378
|
+
from ...models.entities import SharedSession
|
|
362
379
|
from ...services.postgres import get_postgres_service
|
|
380
|
+
from ...utils.model_helpers import get_table_name
|
|
381
|
+
from ... import get_model_registry
|
|
363
382
|
|
|
364
383
|
logger.info(f"Loading data from: {file_path}")
|
|
365
384
|
scope_msg = f"user: {user_id}" if user_id else "public"
|
|
366
385
|
logger.info(f"Scope: {scope_msg}")
|
|
367
386
|
|
|
368
|
-
|
|
369
|
-
|
|
370
|
-
data = yaml.safe_load(f)
|
|
371
|
-
|
|
372
|
-
if not isinstance(data, list):
|
|
373
|
-
logger.error("YAML must be a list of table definitions")
|
|
374
|
-
raise click.Abort()
|
|
375
|
-
|
|
376
|
-
if dry_run:
|
|
377
|
-
logger.info("DRY RUN - Would load:")
|
|
378
|
-
logger.info(yaml.dump(data, default_flow_style=False))
|
|
379
|
-
return
|
|
387
|
+
suffix = file_path.suffix.lower()
|
|
388
|
+
is_yaml = suffix in {".yaml", ".yml"}
|
|
380
389
|
|
|
381
|
-
#
|
|
382
|
-
|
|
390
|
+
# Build MODEL_MAP dynamically from registry
|
|
391
|
+
registry = get_model_registry()
|
|
392
|
+
registry.register_core_models()
|
|
383
393
|
MODEL_MAP = {
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
"resources": Resource,
|
|
387
|
-
"messages": Message,
|
|
388
|
-
"schemas": Schema,
|
|
394
|
+
get_table_name(model): model
|
|
395
|
+
for model in registry.get_model_classes().values()
|
|
389
396
|
}
|
|
390
397
|
|
|
391
398
|
# Non-CoreModel tables that need direct SQL insertion
|
|
392
399
|
DIRECT_INSERT_TABLES = {"shared_sessions"}
|
|
393
400
|
|
|
401
|
+
# Parse file based on format
|
|
402
|
+
if is_yaml:
|
|
403
|
+
# YAML with embedded metadata
|
|
404
|
+
with open(file_path) as f:
|
|
405
|
+
data = yaml.safe_load(f)
|
|
406
|
+
|
|
407
|
+
if not isinstance(data, list):
|
|
408
|
+
logger.error("YAML must be a list of table definitions")
|
|
409
|
+
raise click.Abort()
|
|
410
|
+
|
|
411
|
+
if dry_run:
|
|
412
|
+
logger.info("DRY RUN - Would load:")
|
|
413
|
+
logger.info(yaml.dump(data, default_flow_style=False))
|
|
414
|
+
return
|
|
415
|
+
|
|
416
|
+
table_defs = data
|
|
417
|
+
else:
|
|
418
|
+
# Polars-supported format - require --table
|
|
419
|
+
if not table:
|
|
420
|
+
logger.error(f"For {suffix} files, --table is required. Example: rem db load {file_path.name} -t resources")
|
|
421
|
+
raise click.Abort()
|
|
422
|
+
|
|
423
|
+
try:
|
|
424
|
+
df = _load_dataframe_from_file(file_path)
|
|
425
|
+
except Exception as e:
|
|
426
|
+
logger.error(f"Failed to load file: {e}")
|
|
427
|
+
raise click.Abort()
|
|
428
|
+
|
|
429
|
+
rows = df.to_dicts()
|
|
430
|
+
|
|
431
|
+
if dry_run:
|
|
432
|
+
logger.info(f"DRY RUN - Would load {len(rows)} rows to table '{table}':")
|
|
433
|
+
logger.info(f"Columns: {list(df.columns)}")
|
|
434
|
+
|
|
435
|
+
# Validate first row against model if table is known
|
|
436
|
+
if table in MODEL_MAP and rows:
|
|
437
|
+
from ...utils.model_helpers import validate_data_for_model
|
|
438
|
+
result = validate_data_for_model(MODEL_MAP[table], rows[0])
|
|
439
|
+
if result.extra_fields:
|
|
440
|
+
logger.warning(f"Unknown fields (ignored): {result.extra_fields}")
|
|
441
|
+
if result.valid:
|
|
442
|
+
logger.success(f"Sample row validates OK. Required: {result.required_fields or '(none)'}")
|
|
443
|
+
else:
|
|
444
|
+
result.log_errors("Sample row")
|
|
445
|
+
return
|
|
446
|
+
|
|
447
|
+
# Wrap as single table definition
|
|
448
|
+
table_defs = [{"table": table, "rows": rows}]
|
|
449
|
+
|
|
394
450
|
# Connect to database
|
|
395
451
|
pg = get_postgres_service()
|
|
396
452
|
if not pg:
|
|
@@ -399,23 +455,23 @@ async def _load_async(file_path: Path, user_id: str | None, dry_run: bool):
|
|
|
399
455
|
|
|
400
456
|
await pg.connect()
|
|
401
457
|
|
|
458
|
+
# Start embedding worker for generating embeddings
|
|
459
|
+
if pg.embedding_worker:
|
|
460
|
+
await pg.embedding_worker.start()
|
|
461
|
+
|
|
402
462
|
try:
|
|
403
463
|
total_loaded = 0
|
|
404
464
|
|
|
405
|
-
for table_def in
|
|
465
|
+
for table_def in table_defs:
|
|
406
466
|
table_name = table_def["table"]
|
|
407
|
-
key_field = table_def.get("key_field", "id")
|
|
408
467
|
rows = table_def.get("rows", [])
|
|
409
468
|
|
|
410
469
|
# Handle direct insert tables (non-CoreModel)
|
|
411
470
|
if table_name in DIRECT_INSERT_TABLES:
|
|
412
471
|
for row_data in rows:
|
|
413
|
-
#
|
|
414
|
-
if "tenant_id" not in row_data:
|
|
415
|
-
row_data["tenant_id"] = "default"
|
|
472
|
+
# tenant_id is optional - NULL means public/shared
|
|
416
473
|
|
|
417
474
|
if table_name == "shared_sessions":
|
|
418
|
-
# Insert shared_session directly
|
|
419
475
|
await pg.fetch(
|
|
420
476
|
"""INSERT INTO shared_sessions
|
|
421
477
|
(session_id, owner_user_id, shared_with_user_id, tenant_id)
|
|
@@ -424,7 +480,7 @@ async def _load_async(file_path: Path, user_id: str | None, dry_run: bool):
|
|
|
424
480
|
row_data["session_id"],
|
|
425
481
|
row_data["owner_user_id"],
|
|
426
482
|
row_data["shared_with_user_id"],
|
|
427
|
-
row_data
|
|
483
|
+
row_data.get("tenant_id"), # Optional - NULL means public
|
|
428
484
|
)
|
|
429
485
|
total_loaded += 1
|
|
430
486
|
logger.success(f"Loaded shared_session: {row_data['owner_user_id']} -> {row_data['shared_with_user_id']}")
|
|
@@ -434,16 +490,11 @@ async def _load_async(file_path: Path, user_id: str | None, dry_run: bool):
|
|
|
434
490
|
logger.warning(f"Unknown table: {table_name}, skipping")
|
|
435
491
|
continue
|
|
436
492
|
|
|
437
|
-
model_class = MODEL_MAP[table_name]
|
|
493
|
+
model_class = MODEL_MAP[table_name]
|
|
438
494
|
|
|
439
|
-
for row_data in rows:
|
|
440
|
-
#
|
|
441
|
-
#
|
|
442
|
-
# Pass --user-id to scope data privately to a specific user
|
|
443
|
-
if "user_id" not in row_data and user_id is not None:
|
|
444
|
-
row_data["user_id"] = user_id
|
|
445
|
-
if "tenant_id" not in row_data and user_id is not None:
|
|
446
|
-
row_data["tenant_id"] = row_data.get("user_id", user_id)
|
|
495
|
+
for row_idx, row_data in enumerate(rows):
|
|
496
|
+
# tenant_id and user_id are optional - NULL means public/shared data
|
|
497
|
+
# Data files can explicitly set tenant_id/user_id if needed
|
|
447
498
|
|
|
448
499
|
# Convert graph_edges to InlineEdge format if present
|
|
449
500
|
if "graph_edges" in row_data:
|
|
@@ -452,30 +503,40 @@ async def _load_async(file_path: Path, user_id: str | None, dry_run: bool):
|
|
|
452
503
|
for edge in row_data["graph_edges"]
|
|
453
504
|
]
|
|
454
505
|
|
|
455
|
-
# Convert
|
|
456
|
-
# This handles fields like starts_timestamp, ends_timestamp, etc.
|
|
506
|
+
# Convert ISO timestamp strings
|
|
457
507
|
from ...utils.date_utils import parse_iso
|
|
458
508
|
for key, value in list(row_data.items()):
|
|
459
509
|
if isinstance(value, str) and (key.endswith("_timestamp") or key.endswith("_at")):
|
|
460
510
|
try:
|
|
461
511
|
row_data[key] = parse_iso(value)
|
|
462
512
|
except (ValueError, TypeError):
|
|
463
|
-
pass
|
|
513
|
+
pass
|
|
464
514
|
|
|
465
|
-
# Create model instance and upsert via repository
|
|
466
515
|
from ...services.postgres.repository import Repository
|
|
516
|
+
from ...utils.model_helpers import validate_data_for_model
|
|
467
517
|
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
|
|
518
|
+
result = validate_data_for_model(model_class, row_data)
|
|
519
|
+
if not result.valid:
|
|
520
|
+
result.log_errors(f"Row {row_idx + 1} ({table_name})")
|
|
521
|
+
raise click.Abort()
|
|
522
|
+
|
|
523
|
+
repo = Repository(model_class, table_name, pg)
|
|
524
|
+
await repo.upsert(result.instance) # type: ignore[arg-type]
|
|
471
525
|
total_loaded += 1
|
|
472
526
|
|
|
473
|
-
|
|
474
|
-
name = getattr(instance, 'name', getattr(instance, 'id', '?'))
|
|
527
|
+
name = getattr(result.instance, 'name', getattr(result.instance, 'id', '?'))
|
|
475
528
|
logger.success(f"Loaded {table_name[:-1]}: {name}")
|
|
476
529
|
|
|
477
530
|
logger.success(f"Data loaded successfully! Total rows: {total_loaded}")
|
|
478
531
|
|
|
532
|
+
# Wait for embeddings to complete
|
|
533
|
+
if pg.embedding_worker and pg.embedding_worker.running:
|
|
534
|
+
queue_size = pg.embedding_worker.task_queue.qsize()
|
|
535
|
+
if queue_size > 0:
|
|
536
|
+
logger.info(f"Waiting for {queue_size} embeddings to complete...")
|
|
537
|
+
await pg.embedding_worker.stop()
|
|
538
|
+
logger.success("Embeddings generated successfully")
|
|
539
|
+
|
|
479
540
|
finally:
|
|
480
541
|
await pg.disconnect()
|
|
481
542
|
|
|
@@ -580,7 +641,7 @@ async def _diff_async(
|
|
|
580
641
|
|
|
581
642
|
if not result.has_changes:
|
|
582
643
|
click.secho("✓ No schema drift detected", fg="green")
|
|
583
|
-
click.echo(" Database matches
|
|
644
|
+
click.echo(" Database matches source (tables, functions, triggers, views)")
|
|
584
645
|
if result.filtered_count > 0:
|
|
585
646
|
click.echo()
|
|
586
647
|
click.secho(f" ({result.filtered_count} destructive change(s) hidden by '{strategy}' strategy)", fg="yellow")
|
|
@@ -592,17 +653,34 @@ async def _diff_async(
|
|
|
592
653
|
if result.filtered_count > 0:
|
|
593
654
|
click.secho(f" ({result.filtered_count} destructive change(s) hidden by '{strategy}' strategy)", fg="yellow")
|
|
594
655
|
click.echo()
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
599
|
-
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
|
|
605
|
-
|
|
656
|
+
|
|
657
|
+
# Table/column changes (Alembic)
|
|
658
|
+
if result.summary:
|
|
659
|
+
click.echo("Table Changes:")
|
|
660
|
+
for line in result.summary:
|
|
661
|
+
if line.startswith("+"):
|
|
662
|
+
click.secho(f" {line}", fg="green")
|
|
663
|
+
elif line.startswith("-"):
|
|
664
|
+
click.secho(f" {line}", fg="red")
|
|
665
|
+
elif line.startswith("~"):
|
|
666
|
+
click.secho(f" {line}", fg="yellow")
|
|
667
|
+
else:
|
|
668
|
+
click.echo(f" {line}")
|
|
669
|
+
click.echo()
|
|
670
|
+
|
|
671
|
+
# Programmable object changes (functions, triggers, views)
|
|
672
|
+
if result.programmable_summary:
|
|
673
|
+
click.echo("Programmable Objects (functions/triggers/views):")
|
|
674
|
+
for line in result.programmable_summary:
|
|
675
|
+
if line.startswith("+"):
|
|
676
|
+
click.secho(f" {line}", fg="green")
|
|
677
|
+
elif line.startswith("-"):
|
|
678
|
+
click.secho(f" {line}", fg="red")
|
|
679
|
+
elif line.startswith("~"):
|
|
680
|
+
click.secho(f" {line}", fg="yellow")
|
|
681
|
+
else:
|
|
682
|
+
click.echo(f" {line}")
|
|
683
|
+
click.echo()
|
|
606
684
|
|
|
607
685
|
# Generate migration if requested
|
|
608
686
|
if generate:
|
rem/cli/commands/process.py
CHANGED
|
@@ -11,39 +11,102 @@ from rem.services.content import ContentService
|
|
|
11
11
|
|
|
12
12
|
|
|
13
13
|
@click.command(name="ingest")
|
|
14
|
-
@click.argument("
|
|
15
|
-
@click.option("--
|
|
14
|
+
@click.argument("path", type=click.Path(exists=True))
|
|
15
|
+
@click.option("--table", "-t", default=None, help="Target table (e.g., ontologies, resources). Auto-detected for schemas.")
|
|
16
|
+
@click.option("--make-private", is_flag=True, help="Make data private to a specific user. RARELY NEEDED - most data should be public/shared.")
|
|
17
|
+
@click.option("--user-id", default=None, help="User ID for private data. REQUIRES --make-private flag.")
|
|
16
18
|
@click.option("--category", help="Optional file category")
|
|
17
19
|
@click.option("--tags", help="Optional comma-separated tags")
|
|
20
|
+
@click.option("--pattern", "-p", default="**/*.md", help="Glob pattern for directory ingestion (default: **/*.md)")
|
|
21
|
+
@click.option("--dry-run", is_flag=True, help="Show what would be ingested without making changes")
|
|
18
22
|
def process_ingest(
|
|
19
|
-
|
|
23
|
+
path: str,
|
|
24
|
+
table: str | None,
|
|
25
|
+
make_private: bool,
|
|
20
26
|
user_id: str | None,
|
|
21
27
|
category: str | None,
|
|
22
28
|
tags: str | None,
|
|
29
|
+
pattern: str,
|
|
30
|
+
dry_run: bool,
|
|
23
31
|
):
|
|
24
32
|
"""
|
|
25
|
-
Ingest
|
|
33
|
+
Ingest files into REM (storage + parsing + embedding).
|
|
26
34
|
|
|
27
|
-
|
|
28
|
-
|
|
29
|
-
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
35
|
+
Supports both single files and directories. For directories, recursively
|
|
36
|
+
processes files matching the pattern (default: **/*.md).
|
|
37
|
+
|
|
38
|
+
**IMPORTANT: Data is PUBLIC by default.** This is the correct behavior for
|
|
39
|
+
shared knowledge bases (ontologies, procedures, reference data). Private
|
|
40
|
+
user-scoped data is rarely needed and requires explicit --make-private flag.
|
|
41
|
+
|
|
42
|
+
Target table is auto-detected for schemas (agent.yaml → schemas table).
|
|
43
|
+
Use --table to explicitly set the target (e.g., ontologies for clinical knowledge).
|
|
33
44
|
|
|
34
45
|
Examples:
|
|
35
46
|
rem process ingest sample.pdf
|
|
36
47
|
rem process ingest contract.docx --category legal --tags contract,2023
|
|
37
48
|
rem process ingest agent.yaml # Auto-detects kind=agent, saves to schemas table
|
|
49
|
+
|
|
50
|
+
# Directory ingestion into ontologies table (PUBLIC - no user-id needed)
|
|
51
|
+
rem process ingest ontology/procedures/scid-5/ --table ontologies
|
|
52
|
+
rem process ingest ontology/ --table ontologies --pattern "**/*.md"
|
|
53
|
+
|
|
54
|
+
# Preview what would be ingested
|
|
55
|
+
rem process ingest ontology/ --table ontologies --dry-run
|
|
56
|
+
|
|
57
|
+
# RARE: Private user-scoped data (requires --make-private)
|
|
58
|
+
rem process ingest private-notes.md --make-private --user-id user-123
|
|
38
59
|
"""
|
|
39
60
|
import asyncio
|
|
61
|
+
|
|
62
|
+
# Validate: user_id requires --make-private flag
|
|
63
|
+
if user_id and not make_private:
|
|
64
|
+
raise click.UsageError(
|
|
65
|
+
"Setting --user-id requires the --make-private flag.\n\n"
|
|
66
|
+
"Data should be PUBLIC by default (no user-id). Private user-scoped data\n"
|
|
67
|
+
"is rarely needed - only use --make-private for truly personal content.\n\n"
|
|
68
|
+
"Example: rem process ingest file.md --make-private --user-id user-123"
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
# If --make-private is set, user_id is required
|
|
72
|
+
if make_private and not user_id:
|
|
73
|
+
raise click.UsageError(
|
|
74
|
+
"--make-private requires --user-id to specify which user owns the data.\n\n"
|
|
75
|
+
"Example: rem process ingest file.md --make-private --user-id user-123"
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
# Clear user_id if not making private (ensure None for public data)
|
|
79
|
+
effective_user_id = user_id if make_private else None
|
|
80
|
+
from pathlib import Path
|
|
40
81
|
from ...services.content import ContentService
|
|
41
82
|
|
|
42
83
|
async def _ingest():
|
|
43
|
-
# Initialize ContentService with repositories for proper resource saving
|
|
44
84
|
from rem.services.postgres import get_postgres_service
|
|
45
85
|
from rem.services.postgres.repository import Repository
|
|
46
|
-
from rem.models.entities import File, Resource
|
|
86
|
+
from rem.models.entities import File, Resource, Ontology
|
|
87
|
+
|
|
88
|
+
input_path = Path(path)
|
|
89
|
+
tag_list = tags.split(",") if tags else None
|
|
90
|
+
|
|
91
|
+
# Collect files to process
|
|
92
|
+
if input_path.is_dir():
|
|
93
|
+
files_to_process = list(input_path.glob(pattern))
|
|
94
|
+
if not files_to_process:
|
|
95
|
+
logger.error(f"No files matching '{pattern}' found in {input_path}")
|
|
96
|
+
sys.exit(1)
|
|
97
|
+
logger.info(f"Found {len(files_to_process)} files matching '{pattern}'")
|
|
98
|
+
else:
|
|
99
|
+
files_to_process = [input_path]
|
|
100
|
+
|
|
101
|
+
# Dry run: just show what would be processed
|
|
102
|
+
if dry_run:
|
|
103
|
+
logger.info("DRY RUN - Would ingest:")
|
|
104
|
+
for f in files_to_process[:20]:
|
|
105
|
+
entity_key = f.stem # filename without extension
|
|
106
|
+
logger.info(f" {f} → {table or 'auto-detect'} (key: {entity_key})")
|
|
107
|
+
if len(files_to_process) > 20:
|
|
108
|
+
logger.info(f" ... and {len(files_to_process) - 20} more files")
|
|
109
|
+
return
|
|
47
110
|
|
|
48
111
|
db = get_postgres_service()
|
|
49
112
|
if not db:
|
|
@@ -51,53 +114,118 @@ def process_ingest(
|
|
|
51
114
|
await db.connect()
|
|
52
115
|
|
|
53
116
|
try:
|
|
54
|
-
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
user_id=user_id,
|
|
65
|
-
category=category,
|
|
66
|
-
tags=tag_list,
|
|
67
|
-
is_local_server=True, # CLI is local
|
|
68
|
-
)
|
|
69
|
-
|
|
70
|
-
# Handle schema ingestion (agents/evaluators)
|
|
71
|
-
if result.get("schema_name"):
|
|
72
|
-
logger.success(f"Schema ingested: {result['schema_name']} (kind={result.get('kind', 'agent')})")
|
|
73
|
-
logger.info(f"Version: {result.get('version', '1.0.0')}")
|
|
74
|
-
# Handle file ingestion
|
|
75
|
-
elif result.get("processing_status") == "completed":
|
|
76
|
-
logger.success(f"File ingested: {result['file_name']}")
|
|
77
|
-
logger.info(f"File ID: {result['file_id']}")
|
|
78
|
-
logger.info(f"Resources created: {result['resources_created']}")
|
|
117
|
+
# Direct table ingestion (ontologies, etc.)
|
|
118
|
+
if table:
|
|
119
|
+
await _ingest_to_table(
|
|
120
|
+
db=db,
|
|
121
|
+
files=files_to_process,
|
|
122
|
+
table_name=table,
|
|
123
|
+
user_id=effective_user_id,
|
|
124
|
+
category=category,
|
|
125
|
+
tag_list=tag_list,
|
|
126
|
+
)
|
|
79
127
|
else:
|
|
80
|
-
|
|
81
|
-
|
|
128
|
+
# Standard file ingestion via ContentService
|
|
129
|
+
file_repo = Repository(File, "files", db=db)
|
|
130
|
+
resource_repo = Repository(Resource, "resources", db=db)
|
|
131
|
+
service = ContentService(file_repo=file_repo, resource_repo=resource_repo)
|
|
132
|
+
|
|
133
|
+
for file_path in files_to_process:
|
|
134
|
+
scope_msg = f"user: {effective_user_id}" if effective_user_id else "public"
|
|
135
|
+
logger.info(f"Ingesting: {file_path} ({scope_msg})")
|
|
136
|
+
|
|
137
|
+
result = await service.ingest_file(
|
|
138
|
+
file_uri=str(file_path),
|
|
139
|
+
user_id=effective_user_id,
|
|
140
|
+
category=category,
|
|
141
|
+
tags=tag_list,
|
|
142
|
+
is_local_server=True,
|
|
143
|
+
)
|
|
144
|
+
|
|
145
|
+
# Handle schema ingestion (agents/evaluators)
|
|
146
|
+
if result.get("schema_name"):
|
|
147
|
+
logger.success(f"Schema: {result['schema_name']} (kind={result.get('kind', 'agent')})")
|
|
148
|
+
elif result.get("processing_status") == "completed":
|
|
149
|
+
logger.success(f"File: {result['file_name']} ({result['resources_created']} resources)")
|
|
150
|
+
else:
|
|
151
|
+
logger.error(f"Failed: {result.get('message', 'Unknown error')}")
|
|
82
152
|
|
|
83
153
|
except Exception as e:
|
|
84
154
|
logger.error(f"Error during ingestion: {e}")
|
|
85
155
|
sys.exit(1)
|
|
86
156
|
finally:
|
|
87
|
-
# Wait for
|
|
157
|
+
# Wait for embedding worker to finish
|
|
88
158
|
from rem.services.embeddings.worker import get_global_embedding_worker
|
|
89
159
|
try:
|
|
90
160
|
worker = get_global_embedding_worker()
|
|
91
161
|
if worker and worker.running and not worker.task_queue.empty():
|
|
92
|
-
logger.info(f"Waiting for {worker.task_queue.qsize()} embedding tasks
|
|
93
|
-
# Worker.stop() waits for queue to drain (see worker.py line ~148)
|
|
162
|
+
logger.info(f"Waiting for {worker.task_queue.qsize()} embedding tasks...")
|
|
94
163
|
await worker.stop()
|
|
95
164
|
except RuntimeError:
|
|
96
|
-
# Worker doesn't exist yet - no tasks queued
|
|
97
165
|
pass
|
|
98
166
|
|
|
99
167
|
await db.disconnect()
|
|
100
168
|
|
|
169
|
+
async def _ingest_to_table(db, files, table_name, user_id, category, tag_list):
|
|
170
|
+
"""Direct ingestion of files to a specific table (ontologies, etc.)."""
|
|
171
|
+
from rem.services.postgres.repository import Repository
|
|
172
|
+
from rem import get_model_registry
|
|
173
|
+
from rem.utils.model_helpers import get_table_name
|
|
174
|
+
|
|
175
|
+
# Get model class for table
|
|
176
|
+
registry = get_model_registry()
|
|
177
|
+
registry.register_core_models()
|
|
178
|
+
model_class = None
|
|
179
|
+
for model in registry.get_model_classes().values():
|
|
180
|
+
if get_table_name(model) == table_name:
|
|
181
|
+
model_class = model
|
|
182
|
+
break
|
|
183
|
+
|
|
184
|
+
if not model_class:
|
|
185
|
+
logger.error(f"Unknown table: {table_name}")
|
|
186
|
+
sys.exit(1)
|
|
187
|
+
|
|
188
|
+
repo = Repository(model_class, table_name, db=db)
|
|
189
|
+
processed = 0
|
|
190
|
+
failed = 0
|
|
191
|
+
|
|
192
|
+
for file_path in files:
|
|
193
|
+
try:
|
|
194
|
+
# Read file content
|
|
195
|
+
content = file_path.read_text(encoding="utf-8")
|
|
196
|
+
entity_key = file_path.stem # filename without extension
|
|
197
|
+
|
|
198
|
+
# Build entity based on table
|
|
199
|
+
entity_data = {
|
|
200
|
+
"name": entity_key,
|
|
201
|
+
"content": content,
|
|
202
|
+
"tags": tag_list or [],
|
|
203
|
+
}
|
|
204
|
+
|
|
205
|
+
# Add optional fields
|
|
206
|
+
if category:
|
|
207
|
+
entity_data["category"] = category
|
|
208
|
+
|
|
209
|
+
# Scoping: user_id for private data, "public" for shared
|
|
210
|
+
# tenant_id="public" is the default for shared knowledge bases
|
|
211
|
+
entity_data["tenant_id"] = user_id or "public"
|
|
212
|
+
entity_data["user_id"] = user_id # None = public/shared
|
|
213
|
+
|
|
214
|
+
# For ontologies, add URI
|
|
215
|
+
if table_name == "ontologies":
|
|
216
|
+
entity_data["uri"] = f"file://{file_path.absolute()}"
|
|
217
|
+
|
|
218
|
+
entity = model_class(**entity_data)
|
|
219
|
+
await repo.upsert(entity, embeddable_fields=["content"], generate_embeddings=True)
|
|
220
|
+
processed += 1
|
|
221
|
+
logger.success(f" ✓ {entity_key}")
|
|
222
|
+
|
|
223
|
+
except Exception as e:
|
|
224
|
+
failed += 1
|
|
225
|
+
logger.error(f" ✗ {file_path.name}: {e}")
|
|
226
|
+
|
|
227
|
+
logger.info(f"Completed: {processed} succeeded, {failed} failed")
|
|
228
|
+
|
|
101
229
|
asyncio.run(_ingest())
|
|
102
230
|
|
|
103
231
|
def register_commands(group: click.Group):
|