remdb 0.3.7__py3-none-any.whl → 0.3.14__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (43) hide show
  1. rem/__init__.py +129 -2
  2. rem/agentic/context.py +7 -5
  3. rem/agentic/providers/phoenix.py +32 -43
  4. rem/api/README.md +23 -0
  5. rem/api/main.py +27 -2
  6. rem/api/middleware/tracking.py +172 -0
  7. rem/api/routers/auth.py +54 -0
  8. rem/api/routers/chat/completions.py +1 -1
  9. rem/cli/commands/ask.py +13 -10
  10. rem/cli/commands/configure.py +4 -3
  11. rem/cli/commands/db.py +17 -3
  12. rem/cli/commands/experiments.py +76 -72
  13. rem/cli/commands/process.py +8 -7
  14. rem/cli/commands/scaffold.py +47 -0
  15. rem/cli/main.py +2 -0
  16. rem/models/entities/user.py +10 -3
  17. rem/registry.py +367 -0
  18. rem/services/content/providers.py +92 -133
  19. rem/services/dreaming/affinity_service.py +2 -16
  20. rem/services/dreaming/moment_service.py +2 -15
  21. rem/services/embeddings/api.py +20 -13
  22. rem/services/phoenix/EXPERIMENT_DESIGN.md +3 -3
  23. rem/services/phoenix/client.py +148 -14
  24. rem/services/postgres/schema_generator.py +86 -5
  25. rem/services/rate_limit.py +113 -0
  26. rem/services/rem/README.md +14 -0
  27. rem/services/user_service.py +98 -0
  28. rem/settings.py +79 -10
  29. rem/sql/install_models.sql +13 -0
  30. rem/sql/migrations/003_seed_default_user.sql +48 -0
  31. rem/utils/constants.py +97 -0
  32. rem/utils/date_utils.py +228 -0
  33. rem/utils/embeddings.py +17 -4
  34. rem/utils/files.py +167 -0
  35. rem/utils/mime_types.py +158 -0
  36. rem/utils/schema_loader.py +63 -14
  37. rem/utils/vision.py +9 -14
  38. rem/workers/README.md +14 -14
  39. rem/workers/db_maintainer.py +74 -0
  40. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/METADATA +169 -121
  41. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/RECORD +43 -32
  42. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/WHEEL +0 -0
  43. {remdb-0.3.7.dist-info → remdb-0.3.14.dist-info}/entry_points.txt +0 -0
rem/cli/commands/ask.py CHANGED
@@ -89,8 +89,8 @@ async def run_agent_streaming(
89
89
  context: Optional AgentContext for session persistence
90
90
  max_iterations: Maximum iterations/requests (from agent schema or settings)
91
91
  """
92
- from datetime import datetime, timezone
93
92
  from pydantic_ai import UsageLimits
93
+ from rem.utils.date_utils import to_iso_with_z, utc_now
94
94
 
95
95
  logger.info("Running agent in streaming mode...")
96
96
 
@@ -151,13 +151,13 @@ async def run_agent_streaming(
151
151
  user_message = {
152
152
  "role": "user",
153
153
  "content": user_message_content,
154
- "timestamp": datetime.now(timezone.utc).isoformat(),
154
+ "timestamp": to_iso_with_z(utc_now()),
155
155
  }
156
156
 
157
157
  assistant_message = {
158
158
  "role": "assistant",
159
159
  "content": "".join(assistant_response_parts),
160
- "timestamp": datetime.now(timezone.utc).isoformat(),
160
+ "timestamp": to_iso_with_z(utc_now()),
161
161
  }
162
162
 
163
163
  # Store messages with compression
@@ -200,8 +200,8 @@ async def run_agent_non_streaming(
200
200
  Returns:
201
201
  Output data if successful, None otherwise
202
202
  """
203
- from datetime import datetime, timezone
204
203
  from pydantic_ai import UsageLimits
204
+ from rem.utils.date_utils import to_iso_with_z, utc_now
205
205
 
206
206
  logger.info("Running agent in non-streaming mode...")
207
207
 
@@ -248,13 +248,13 @@ async def run_agent_non_streaming(
248
248
  user_message = {
249
249
  "role": "user",
250
250
  "content": user_message_content,
251
- "timestamp": datetime.now(timezone.utc).isoformat(),
251
+ "timestamp": to_iso_with_z(utc_now()),
252
252
  }
253
253
 
254
254
  assistant_message = {
255
255
  "role": "assistant",
256
256
  "content": assistant_content,
257
- "timestamp": datetime.now(timezone.utc).isoformat(),
257
+ "timestamp": to_iso_with_z(utc_now()),
258
258
  }
259
259
 
260
260
  # Store messages with compression
@@ -357,8 +357,8 @@ async def _save_output_file(file_path: Path, data: dict[str, Any]) -> None:
357
357
  )
358
358
  @click.option(
359
359
  "--user-id",
360
- default="test-user",
361
- help="User ID for context (default: test-user)",
360
+ default=None,
361
+ help="User ID for context (default: from settings.test.effective_user_id)",
362
362
  )
363
363
  @click.option(
364
364
  "--session-id",
@@ -393,7 +393,7 @@ def ask(
393
393
  max_turns: int,
394
394
  version: str | None,
395
395
  stream: bool,
396
- user_id: str,
396
+ user_id: str | None,
397
397
  session_id: str | None,
398
398
  input_file: Path | None,
399
399
  output_file: Path | None,
@@ -434,6 +434,9 @@ def ask(
434
434
  # Two arguments provided
435
435
  name = name_or_query
436
436
 
437
+ # Resolve user_id from settings if not provided
438
+ effective_user_id = user_id or settings.test.effective_user_id
439
+
437
440
  asyncio.run(
438
441
  _ask_async(
439
442
  name=name,
@@ -443,7 +446,7 @@ def ask(
443
446
  max_turns=max_turns,
444
447
  version=version,
445
448
  stream=stream,
446
- user_id=user_id,
449
+ user_id=effective_user_id,
447
450
  session_id=session_id,
448
451
  input_file=input_file,
449
452
  output_file=output_file,
@@ -49,7 +49,7 @@ def prompt_postgres_config(use_defaults: bool = False) -> dict:
49
49
 
50
50
  # Default values
51
51
  host = "localhost"
52
- port = 5050
52
+ port = 5051
53
53
  database = "rem"
54
54
  username = "rem"
55
55
  password = "rem"
@@ -431,8 +431,9 @@ def configure_command(install: bool, claude_desktop: bool, show: bool, edit: boo
431
431
  if os.name == "nt": # Windows
432
432
  config_dir = Path.home() / "AppData/Roaming/Claude"
433
433
  elif os.name == "posix":
434
- if Path.home() / "Library/Application Support/Claude":
435
- config_dir = Path.home() / "Library/Application Support/Claude"
434
+ macos_path = Path.home() / "Library/Application Support/Claude"
435
+ if macos_path.exists():
436
+ config_dir = macos_path
436
437
  else:
437
438
  config_dir = Path.home() / ".config/Claude"
438
439
  else:
rem/cli/commands/db.py CHANGED
@@ -382,9 +382,9 @@ def rebuild_cache(connection: str | None):
382
382
 
383
383
  @click.command()
384
384
  @click.argument("file_path", type=click.Path(exists=True, path_type=Path))
385
- @click.option("--user-id", default="test-user", help="User ID for loaded data")
385
+ @click.option("--user-id", default=None, help="User ID for loaded data (default: from settings)")
386
386
  @click.option("--dry-run", is_flag=True, help="Show what would be loaded without loading")
387
- def load(file_path: Path, user_id: str, dry_run: bool):
387
+ def load(file_path: Path, user_id: str | None, dry_run: bool):
388
388
  """
389
389
  Load data from YAML file into database.
390
390
 
@@ -400,7 +400,11 @@ def load(file_path: Path, user_id: str, dry_run: bool):
400
400
  rem db load data.yaml --user-id my-user
401
401
  rem db load data.yaml --dry-run
402
402
  """
403
- asyncio.run(_load_async(file_path, user_id, dry_run))
403
+ from ...settings import settings
404
+
405
+ # Resolve user_id from settings if not provided
406
+ effective_user_id = user_id or settings.test.effective_user_id
407
+ asyncio.run(_load_async(file_path, effective_user_id, dry_run))
404
408
 
405
409
 
406
410
  async def _load_async(file_path: Path, user_id: str, dry_run: bool):
@@ -467,6 +471,16 @@ async def _load_async(file_path: Path, user_id: str, dry_run: bool):
467
471
  for edge in row_data["graph_edges"]
468
472
  ]
469
473
 
474
+ # Convert any ISO timestamp strings with Z suffix to naive datetime
475
+ # This handles fields like starts_timestamp, ends_timestamp, etc.
476
+ from ...utils.date_utils import parse_iso
477
+ for key, value in list(row_data.items()):
478
+ if isinstance(value, str) and (key.endswith("_timestamp") or key.endswith("_at")):
479
+ try:
480
+ row_data[key] = parse_iso(value)
481
+ except (ValueError, TypeError):
482
+ pass # Not a valid datetime string, leave as-is
483
+
470
484
  # Create model instance and upsert via repository
471
485
  from ...services.postgres.repository import Repository
472
486
 
@@ -578,8 +578,7 @@ def run(
578
578
  from rem.services.git import GitService
579
579
  from rem.services.phoenix import PhoenixClient
580
580
  from rem.agentic.providers.phoenix import create_evaluator_from_schema
581
- from datetime import datetime
582
- import pandas as pd
581
+ from rem.utils.date_utils import utc_now, to_iso, format_timestamp_for_experiment
583
582
  import os
584
583
 
585
584
  try:
@@ -615,36 +614,22 @@ def run(
615
614
  click.echo(f" Mode: DRY RUN (no data will be saved)")
616
615
  click.echo()
617
616
 
618
- # Load agent schema from Git or filesystem
617
+ # Load agent schema using centralized schema loader
619
618
  agent_name = config.agent_schema_ref.name
620
619
  agent_version = config.agent_schema_ref.version
621
620
 
622
621
  click.echo(f"Loading agent schema: {agent_name} (version: {agent_version or 'latest'})")
623
622
 
624
- # Try Git first, fallback to filesystem
625
- agent_schema = None
626
- try:
627
- git_svc = GitService()
628
- agent_schema = git_svc.load_schema(agent_name, version=agent_version)
629
- click.echo(f"✓ Loaded agent schema from Git")
630
- except Exception as e:
631
- logger.debug(f"Git not available, trying filesystem: {e}")
623
+ from rem.utils.schema_loader import load_agent_schema
632
624
 
633
- # Fallback to local filesystem
634
- from rem.services.fs import FS
635
- fs = FS()
636
-
637
- schema_path = f"schemas/agents/{agent_name}.yaml"
638
- try:
639
- agent_schema = fs.read(schema_path)
640
- click.echo(f"✓ Loaded agent schema from filesystem")
641
- except Exception as fs_error:
642
- logger.error(f"Failed to load agent schema: Git: {e}, FS: {fs_error}")
643
- click.echo(f"Error: Could not load agent schema '{agent_name}'")
644
- click.echo(f" Tried Git: {e}")
645
- click.echo(f" Tried filesystem: {schema_path}")
646
- click.echo(f" Make sure the schema exists")
647
- raise click.Abort()
625
+ try:
626
+ agent_schema = load_agent_schema(agent_name)
627
+ click.echo(f"✓ Loaded agent schema: {agent_name}")
628
+ except FileNotFoundError as e:
629
+ logger.error(f"Failed to load agent schema: {e}")
630
+ click.echo(f"Error: Could not load agent schema '{agent_name}'")
631
+ click.echo(f" {e}")
632
+ raise click.Abort()
648
633
 
649
634
  # Create agent function from schema
650
635
  from rem.agentic.providers.pydantic_ai import create_agent
@@ -683,73 +668,85 @@ def run(
683
668
  return {"output": serialized}
684
669
  return serialized if isinstance(serialized, dict) else {"output": str(serialized)}
685
670
 
686
- # Load evaluator schema
671
+ # Load evaluator schema using centralized schema loader
687
672
  evaluator_name = config.evaluator_schema_ref.name
688
673
  evaluator_version = config.evaluator_schema_ref.version
689
674
 
690
- # Resolve evaluator path (evaluators are organized by agent name)
691
- evaluator_schema_path = f"rem/schemas/evaluators/{agent_name}/{evaluator_name}.yaml"
692
-
693
675
  click.echo(f"Loading evaluator: {evaluator_name} for agent {agent_name}")
694
676
 
695
- try:
696
- evaluator_fn = create_evaluator_from_schema(
697
- evaluator_schema_path=evaluator_schema_path,
698
- model_name=None, # Use default from schema
699
- )
700
- click.echo(f"✓ Loaded evaluator schema")
701
- except Exception as e:
702
- logger.warning(f"Failed to load evaluator: {e}")
703
- click.echo(f"Error: Could not load evaluator schema")
704
- click.echo(f" Path: {evaluator_schema_path}")
705
- click.echo(f" Make sure the schema exists")
677
+ # Try multiple evaluator path patterns (agent-specific, then generic)
678
+ evaluator_paths_to_try = [
679
+ f"{agent_name}/{evaluator_name}", # e.g., hello-world/default
680
+ f"{agent_name}-{evaluator_name}", # e.g., hello-world-default
681
+ evaluator_name, # e.g., default (generic)
682
+ ]
683
+
684
+ evaluator_fn = None
685
+ evaluator_load_error = None
686
+
687
+ for evaluator_path in evaluator_paths_to_try:
688
+ try:
689
+ evaluator_fn = create_evaluator_from_schema(
690
+ evaluator_schema_path=evaluator_path,
691
+ model_name=None, # Use default from schema
692
+ )
693
+ click.echo(f"✓ Loaded evaluator schema: {evaluator_path}")
694
+ break
695
+ except FileNotFoundError as e:
696
+ evaluator_load_error = e
697
+ logger.debug(f"Evaluator not found at {evaluator_path}: {e}")
698
+ continue
699
+ except Exception as e:
700
+ evaluator_load_error = e
701
+ logger.warning(f"Failed to load evaluator from {evaluator_path}: {e}")
702
+ continue
703
+
704
+ if evaluator_fn is None:
705
+ click.echo(f"Error: Could not load evaluator schema '{evaluator_name}'")
706
+ click.echo(f" Tried paths: {evaluator_paths_to_try}")
707
+ if evaluator_load_error:
708
+ click.echo(f" Last error: {evaluator_load_error}")
706
709
  raise click.Abort()
707
710
 
708
- # Load dataset
711
+ # Load dataset using Polars
712
+ import polars as pl
713
+
709
714
  click.echo(f"Loading dataset: {list(config.datasets.keys())[0]}")
710
715
  dataset_ref = list(config.datasets.values())[0]
711
716
 
712
717
  if dataset_ref.location.value == "git":
713
- # Load from Git
718
+ # Load from Git (local filesystem)
714
719
  dataset_path = Path(base_path) / name / dataset_ref.path
715
720
  if not dataset_path.exists():
716
721
  click.echo(f"Error: Dataset not found: {dataset_path}")
717
722
  raise click.Abort()
718
723
 
719
724
  if dataset_ref.format == "csv":
720
- dataset_df = pd.read_csv(dataset_path)
725
+ dataset_df = pl.read_csv(dataset_path)
721
726
  elif dataset_ref.format == "parquet":
722
- dataset_df = pd.read_parquet(dataset_path)
727
+ dataset_df = pl.read_parquet(dataset_path)
723
728
  elif dataset_ref.format == "jsonl":
724
- dataset_df = pd.read_json(dataset_path, lines=True)
729
+ dataset_df = pl.read_ndjson(dataset_path)
725
730
  else:
726
731
  click.echo(f"Error: Format '{dataset_ref.format}' not yet supported")
727
732
  raise click.Abort()
728
733
  elif dataset_ref.location.value in ["s3", "hybrid"]:
729
734
  # Load from S3 using FS provider
730
735
  from rem.services.fs import FS
736
+ from io import BytesIO
731
737
 
732
738
  fs = FS()
733
739
 
734
740
  try:
735
741
  if dataset_ref.format == "csv":
736
742
  content = fs.read(dataset_ref.path)
737
- from io import StringIO
738
- dataset_df = pd.read_csv(StringIO(content))
743
+ dataset_df = pl.read_csv(BytesIO(content.encode() if isinstance(content, str) else content))
739
744
  elif dataset_ref.format == "parquet":
740
- # For parquet, we need binary read
741
- import tempfile
742
- with tempfile.NamedTemporaryFile(suffix=".parquet", delete=False) as tmp:
743
- tmp_path = tmp.name
744
- # Download via FS
745
- content_bytes = fs.read(dataset_ref.path)
746
- tmp.write(content_bytes)
747
- dataset_df = pd.read_parquet(tmp_path)
748
- Path(tmp_path).unlink() # Clean up temp file
745
+ content_bytes = fs.read(dataset_ref.path)
746
+ dataset_df = pl.read_parquet(BytesIO(content_bytes if isinstance(content_bytes, bytes) else content_bytes.encode()))
749
747
  elif dataset_ref.format == "jsonl":
750
748
  content = fs.read(dataset_ref.path)
751
- from io import StringIO
752
- dataset_df = pd.read_json(StringIO(content), lines=True)
749
+ dataset_df = pl.read_ndjson(BytesIO(content.encode() if isinstance(content, str) else content))
753
750
  else:
754
751
  click.echo(f"Error: Format '{dataset_ref.format}' not yet supported")
755
752
  raise click.Abort()
@@ -793,13 +790,13 @@ def run(
793
790
 
794
791
  client = PhoenixClient(config=phoenix_config)
795
792
 
796
- experiment_name = f"{config.name}-{datetime.now().strftime('%Y%m%d-%H%M%S')}"
793
+ experiment_name = f"{config.name}-{format_timestamp_for_experiment()}"
797
794
 
798
795
  click.echo(f"\n⏳ Running experiment: {experiment_name}")
799
796
  click.echo(f" This may take several minutes...")
800
797
 
801
798
  experiment = client.run_experiment(
802
- dataset=dataset_df, # type: ignore[arg-type]
799
+ dataset=dataset_df,
803
800
  task=task_fn,
804
801
  evaluators=[evaluator_fn],
805
802
  experiment_name=experiment_name,
@@ -809,12 +806,15 @@ def run(
809
806
  "evaluator": config.evaluator_schema_ref.name,
810
807
  "experiment_config": config.name,
811
808
  **config.metadata
812
- }
809
+ },
810
+ # Smart column detection for DataFrame -> Phoenix Dataset conversion
811
+ input_keys=["input"] if "input" in dataset_df.columns else None,
812
+ output_keys=["expected_output"] if "expected_output" in dataset_df.columns else None,
813
813
  )
814
814
 
815
815
  # Update experiment status
816
816
  config.status = ExperimentStatus.COMPLETED
817
- config.last_run_at = datetime.now()
817
+ config.last_run_at = utc_now()
818
818
  if not version: # Only save if not loading from Git
819
819
  config.save(base_path)
820
820
 
@@ -835,7 +835,7 @@ def run(
835
835
  "agent": config.agent_schema_ref.name,
836
836
  "evaluator": config.evaluator_schema_ref.name,
837
837
  "dataset_size": len(dataset_df),
838
- "completed_at": datetime.now().isoformat(),
838
+ "completed_at": to_iso(utc_now()),
839
839
  "phoenix_url": getattr(experiment, "url", None),
840
840
  "task_runs": len(exp_data.get("task_runs", [])),
841
841
  }
@@ -1015,20 +1015,24 @@ def dataset_add(
1015
1015
  --output-keys expected_label,expected_type
1016
1016
  """
1017
1017
  from rem.services.phoenix import PhoenixClient
1018
- import pandas as pd
1018
+ import polars as pl
1019
1019
 
1020
1020
  try:
1021
1021
  client = PhoenixClient()
1022
1022
 
1023
- # Load CSV
1024
- df = pd.read_csv(from_csv)
1023
+ # Load CSV with Polars
1024
+ df = pl.read_csv(from_csv)
1025
+ records = df.to_dicts()
1025
1026
 
1026
1027
  # Extract data
1027
- inputs = cast(list[dict[str, Any]], df[input_keys.split(",")].to_dict("records"))
1028
- outputs = cast(list[dict[str, Any]], df[output_keys.split(",")].to_dict("records"))
1028
+ input_cols = input_keys.split(",")
1029
+ output_cols = output_keys.split(",")
1030
+ inputs = [{k: row.get(k) for k in input_cols} for row in records]
1031
+ outputs = [{k: row.get(k) for k in output_cols} for row in records]
1029
1032
  metadata = None
1030
1033
  if metadata_keys:
1031
- metadata = cast(list[dict[str, Any]], df[metadata_keys.split(",")].to_dict("records"))
1034
+ meta_cols = metadata_keys.split(",")
1035
+ metadata = [{k: row.get(k) for k in meta_cols} for row in records]
1032
1036
 
1033
1037
  # Add to dataset
1034
1038
  dataset = client.add_examples_to_dataset(
@@ -1269,12 +1273,12 @@ def trace_list(
1269
1273
  rem experiments trace list --project rem-agents --days 7 --limit 50
1270
1274
  """
1271
1275
  from rem.services.phoenix import PhoenixClient
1272
- from datetime import datetime, timedelta
1276
+ from rem.utils.date_utils import days_ago
1273
1277
 
1274
1278
  try:
1275
1279
  client = PhoenixClient()
1276
1280
 
1277
- start_time = datetime.now() - timedelta(days=days)
1281
+ start_time = days_ago(days)
1278
1282
 
1279
1283
  traces_df = client.get_traces(
1280
1284
  project_name=project,
@@ -192,15 +192,13 @@ def process_uri(uri: str, output: str, save: str | None):
192
192
 
193
193
 
194
194
  @click.command(name="files")
195
- @click.option("--tenant-id", required=True, help="Tenant ID")
196
- @click.option("--user-id", help="Filter by user ID")
195
+ @click.option("--user-id", default=None, help="User ID (default: from settings)")
197
196
  @click.option("--status", type=click.Choice(["pending", "processing", "completed", "failed"]), help="Filter by status")
198
197
  @click.option("--extractor", help="Run files through custom extractor (e.g., cv-parser-v1)")
199
198
  @click.option("--limit", type=int, help="Max files to process")
200
199
  @click.option("--provider", help="Optional LLM provider override")
201
200
  @click.option("--model", help="Optional model override")
202
201
  def process_files(
203
- tenant_id: str,
204
202
  user_id: Optional[str],
205
203
  status: Optional[str],
206
204
  extractor: Optional[str],
@@ -217,19 +215,22 @@ def process_files(
217
215
 
218
216
  \b
219
217
  # List completed files
220
- rem process files --tenant-id acme-corp --status completed
218
+ rem process files --status completed
221
219
 
222
220
  \b
223
221
  # Extract from CV files
224
- rem process files --tenant-id acme-corp --extractor cv-parser-v1 --limit 10
222
+ rem process files --extractor cv-parser-v1 --limit 10
225
223
 
226
224
  \b
227
225
  # Extract with provider override
228
- rem process files --tenant-id acme-corp --extractor contract-analyzer-v1 \\
226
+ rem process files --extractor contract-analyzer-v1 \\
229
227
  --provider anthropic --model claude-sonnet-4-5
230
228
  """
229
+ from ...settings import settings
230
+ effective_user_id = user_id or settings.test.effective_user_id
231
+
231
232
  logger.warning("Not implemented yet")
232
- logger.info(f"Would process files for tenant: {tenant_id}")
233
+ logger.info(f"Would process files for user: {effective_user_id}")
233
234
 
234
235
  if user_id:
235
236
  logger.info(f"Filter: user_id={user_id}")
@@ -0,0 +1,47 @@
1
+ """
2
+ Scaffold command - generate project structure for REM-based applications.
3
+
4
+ TODO: Implement this command to generate:
5
+ - my_app/main.py (entry point with create_app)
6
+ - my_app/models.py (example CoreModel subclass)
7
+ - my_app/routers/ (example FastAPI router)
8
+ - schemas/agents/ (example agent schema)
9
+ - schemas/evaluators/ (example evaluator)
10
+ - sql/migrations/ (empty migrations directory)
11
+ - pyproject.toml (with remdb dependency)
12
+ - README.md (basic usage instructions)
13
+
14
+ Usage:
15
+ rem scaffold my-app
16
+ rem scaffold my-app --with-examples # Include example models/routers/tools
17
+ """
18
+
19
+ import click
20
+
21
+
22
+ @click.command()
23
+ @click.argument("name")
24
+ @click.option("--with-examples", is_flag=True, help="Include example code")
25
+ def scaffold(name: str, with_examples: bool) -> None:
26
+ """
27
+ Generate a new REM-based project structure.
28
+
29
+ NAME is the project directory name to create.
30
+ """
31
+ click.echo(f"TODO: Scaffold command not yet implemented")
32
+ click.echo(f"Would create project: {name}")
33
+ click.echo(f"With examples: {with_examples}")
34
+ click.echo()
35
+ click.echo("For now, manually create this structure:")
36
+ click.echo(f"""
37
+ {name}/
38
+ ├── {name.replace('-', '_')}/
39
+ │ ├── main.py # Entry point (create_app + extensions)
40
+ │ ├── models.py # Custom models (inherit CoreModel)
41
+ │ └── routers/ # Custom FastAPI routers
42
+ ├── schemas/
43
+ │ ├── agents/ # Custom agent YAML schemas
44
+ │ └── evaluators/ # Custom evaluator schemas
45
+ ├── sql/migrations/ # Custom SQL migrations
46
+ └── pyproject.toml
47
+ """)
rem/cli/main.py CHANGED
@@ -75,6 +75,7 @@ from .commands.experiments import experiments as experiments_group
75
75
  from .commands.configure import register_command as register_configure_command
76
76
  from .commands.serve import register_command as register_serve_command
77
77
  from .commands.mcp import register_command as register_mcp_command
78
+ from .commands.scaffold import scaffold as scaffold_command
78
79
 
79
80
  register_schema_commands(schema)
80
81
  register_db_commands(db)
@@ -85,6 +86,7 @@ register_configure_command(cli)
85
86
  register_serve_command(cli)
86
87
  register_mcp_command(cli)
87
88
  cli.add_command(experiments_group)
89
+ cli.add_command(scaffold_command)
88
90
 
89
91
 
90
92
  def main():
@@ -22,9 +22,12 @@ from ..core import CoreModel
22
22
  class UserTier(str, Enum):
23
23
  """User subscription tier for feature gating."""
24
24
 
25
+ ANONYMOUS = "anonymous"
25
26
  FREE = "free"
26
- SILVER = "silver"
27
- GOLD = "gold"
27
+ BASIC = "basic"
28
+ PRO = "pro"
29
+ SILVER = "silver" # Deprecated? Keeping for backward compatibility if needed
30
+ GOLD = "gold" # Deprecated? Keeping for backward compatibility if needed
28
31
 
29
32
 
30
33
  class User(CoreModel):
@@ -57,7 +60,11 @@ class User(CoreModel):
57
60
  )
58
61
  tier: UserTier = Field(
59
62
  default=UserTier.FREE,
60
- description="User subscription tier (free, silver, gold) for feature gating",
63
+ description="User subscription tier (free, basic, pro) for feature gating",
64
+ )
65
+ anonymous_ids: list[str] = Field(
66
+ default_factory=list,
67
+ description="Linked anonymous session IDs used for merging history",
61
68
  )
62
69
  sec_policy: dict = Field(
63
70
  default_factory=dict,