remdb 0.3.180__py3-none-any.whl → 0.3.258__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. rem/agentic/README.md +36 -2
  2. rem/agentic/__init__.py +10 -1
  3. rem/agentic/context.py +185 -1
  4. rem/agentic/context_builder.py +56 -35
  5. rem/agentic/mcp/tool_wrapper.py +2 -2
  6. rem/agentic/providers/pydantic_ai.py +303 -111
  7. rem/agentic/schema.py +2 -2
  8. rem/api/main.py +1 -1
  9. rem/api/mcp_router/resources.py +223 -0
  10. rem/api/mcp_router/server.py +4 -0
  11. rem/api/mcp_router/tools.py +608 -166
  12. rem/api/routers/admin.py +30 -4
  13. rem/api/routers/auth.py +219 -20
  14. rem/api/routers/chat/child_streaming.py +393 -0
  15. rem/api/routers/chat/completions.py +77 -40
  16. rem/api/routers/chat/sse_events.py +7 -3
  17. rem/api/routers/chat/streaming.py +381 -291
  18. rem/api/routers/chat/streaming_utils.py +325 -0
  19. rem/api/routers/common.py +18 -0
  20. rem/api/routers/dev.py +7 -1
  21. rem/api/routers/feedback.py +11 -3
  22. rem/api/routers/messages.py +176 -38
  23. rem/api/routers/models.py +9 -1
  24. rem/api/routers/query.py +17 -15
  25. rem/api/routers/shared_sessions.py +16 -0
  26. rem/auth/jwt.py +19 -4
  27. rem/auth/middleware.py +42 -28
  28. rem/cli/README.md +62 -0
  29. rem/cli/commands/ask.py +205 -114
  30. rem/cli/commands/db.py +55 -31
  31. rem/cli/commands/experiments.py +1 -1
  32. rem/cli/commands/process.py +179 -43
  33. rem/cli/commands/query.py +109 -0
  34. rem/cli/commands/session.py +117 -0
  35. rem/cli/main.py +2 -0
  36. rem/models/core/experiment.py +1 -1
  37. rem/models/entities/ontology.py +18 -20
  38. rem/models/entities/session.py +1 -0
  39. rem/schemas/agents/core/agent-builder.yaml +1 -1
  40. rem/schemas/agents/rem.yaml +1 -1
  41. rem/schemas/agents/test_orchestrator.yaml +42 -0
  42. rem/schemas/agents/test_structured_output.yaml +52 -0
  43. rem/services/content/providers.py +151 -49
  44. rem/services/content/service.py +18 -5
  45. rem/services/embeddings/worker.py +26 -12
  46. rem/services/postgres/__init__.py +28 -3
  47. rem/services/postgres/diff_service.py +57 -5
  48. rem/services/postgres/programmable_diff_service.py +635 -0
  49. rem/services/postgres/pydantic_to_sqlalchemy.py +2 -2
  50. rem/services/postgres/register_type.py +11 -10
  51. rem/services/postgres/repository.py +39 -28
  52. rem/services/postgres/schema_generator.py +5 -5
  53. rem/services/postgres/sql_builder.py +6 -5
  54. rem/services/rem/README.md +4 -3
  55. rem/services/rem/parser.py +7 -10
  56. rem/services/rem/service.py +47 -0
  57. rem/services/session/__init__.py +8 -1
  58. rem/services/session/compression.py +47 -5
  59. rem/services/session/pydantic_messages.py +310 -0
  60. rem/services/session/reload.py +2 -1
  61. rem/settings.py +92 -7
  62. rem/sql/migrations/001_install.sql +125 -7
  63. rem/sql/migrations/002_install_models.sql +159 -149
  64. rem/sql/migrations/004_cache_system.sql +10 -276
  65. rem/sql/migrations/migrate_session_id_to_uuid.sql +45 -0
  66. rem/utils/schema_loader.py +180 -120
  67. {remdb-0.3.180.dist-info → remdb-0.3.258.dist-info}/METADATA +7 -6
  68. {remdb-0.3.180.dist-info → remdb-0.3.258.dist-info}/RECORD +70 -61
  69. {remdb-0.3.180.dist-info → remdb-0.3.258.dist-info}/WHEEL +0 -0
  70. {remdb-0.3.180.dist-info → remdb-0.3.258.dist-info}/entry_points.txt +0 -0
rem/cli/README.md CHANGED
@@ -434,6 +434,68 @@ Ensure you're using the correct model format:
434
434
  - OpenAI: `openai:gpt-4o-mini`, `openai:gpt-4o`
435
435
  - Anthropic: `anthropic:claude-sonnet-4-5-20250929`
436
436
 
437
+ ## Data Visibility: PUBLIC vs PRIVATE
438
+
439
+ **IMPORTANT: All ingested data is PUBLIC by default.** This is the correct behavior
440
+ for shared knowledge bases (ontologies, procedures, reference data).
441
+
442
+ ### Why PUBLIC by Default?
443
+
444
+ Most data in REM should be searchable by all users:
445
+ - Clinical ontologies (disorders, symptoms, drugs)
446
+ - Procedures and protocols (SCID-5, PHQ-9, etc.)
447
+ - Reference documentation
448
+ - Shared domain knowledge
449
+
450
+ The `rem_lookup()` function searches for data where `user_id IS NULL`, which means
451
+ public data. If you set `user_id` on data, it becomes invisible to other users.
452
+
453
+ ### Ingesting Public Data (Default)
454
+
455
+ ```bash
456
+ # Standard ingestion - data is PUBLIC
457
+ rem process ingest ontology/procedures/ --table ontologies
458
+
459
+ # From S3 - also PUBLIC
460
+ rem process ingest s3://bucket/docs/reference.pdf
461
+ ```
462
+
463
+ ### Ingesting Private Data (Rare)
464
+
465
+ Private data requires explicit `--make-private` flag:
466
+
467
+ ```bash
468
+ # Private user data - requires --make-private and --user-id
469
+ rem process ingest personal-notes.md --make-private --user-id user-123
470
+ ```
471
+
472
+ **When to use private data:**
473
+ - User-uploaded personal documents
474
+ - Session-specific content
475
+ - User notes and annotations
476
+
477
+ **NEVER use private data for:**
478
+ - Ontologies and reference material
479
+ - Clinical procedures and protocols
480
+ - Shared knowledge bases
481
+ - Anything that should be searchable by agents
482
+
483
+ ### Common Mistake
484
+
485
+ If agents can't find data via `search_rem`, the most common cause is that the data
486
+ was ingested with a `user_id` set. Check with:
487
+
488
+ ```sql
489
+ SELECT name, user_id FROM ontologies WHERE name = 'phq-9-procedure';
490
+ -- user_id should be NULL for public data
491
+ ```
492
+
493
+ Fix by setting user_id to NULL:
494
+ ```sql
495
+ UPDATE ontologies SET user_id = NULL WHERE user_id IS NOT NULL;
496
+ UPDATE kv_store SET user_id = NULL WHERE entity_type = 'ontologies' AND user_id IS NOT NULL;
497
+ ```
498
+
437
499
  ## Next Steps
438
500
 
439
501
  1. **Implement Schema Registry**
rem/cli/commands/ask.py CHANGED
@@ -71,16 +71,18 @@ async def run_agent_streaming(
71
71
  max_turns: int = 10,
72
72
  context: AgentContext | None = None,
73
73
  max_iterations: int | None = None,
74
+ user_message: str | None = None,
74
75
  ) -> None:
75
76
  """
76
- Run agent in streaming mode using agent.iter() with usage limits.
77
+ Run agent in streaming mode using the SAME code path as the API.
77
78
 
78
- Design Pattern:
79
- - Use agent.iter() for complete execution with tool call visibility
80
- - run_stream() stops after first output, missing tool calls
81
- - Stream tool call markers: [Calling: tool_name]
82
- - Stream text content deltas as they arrive
83
- - Show final structured result
79
+ This uses stream_openai_response_with_save from the API to ensure:
80
+ 1. Tool calls are saved as separate "tool" messages (not embedded in content)
81
+ 2. Assistant response is clean text only (no [Calling: ...] markers)
82
+ 3. CLI testing is equivalent to API testing
83
+
84
+ The CLI displays tool calls as [Calling: tool_name] for visibility,
85
+ but these are NOT saved to the database.
84
86
 
85
87
  Args:
86
88
  agent: Pydantic AI agent
@@ -88,88 +90,66 @@ async def run_agent_streaming(
88
90
  max_turns: Maximum turns for agent execution (not used in current API)
89
91
  context: Optional AgentContext for session persistence
90
92
  max_iterations: Maximum iterations/requests (from agent schema or settings)
93
+ user_message: The user's original message (for database storage)
91
94
  """
92
- from pydantic_ai import UsageLimits
93
- from rem.utils.date_utils import to_iso_with_z, utc_now
95
+ import json
96
+ from rem.api.routers.chat.streaming import stream_openai_response_with_save, save_user_message
94
97
 
95
98
  logger.info("Running agent in streaming mode...")
96
99
 
97
100
  try:
98
- # Import event types for streaming
99
- from pydantic_ai import Agent as PydanticAgent
100
- from pydantic_ai.messages import PartStartEvent, PartDeltaEvent, TextPartDelta, ToolCallPart
101
-
102
- # Accumulate assistant response for session persistence
103
- assistant_response_parts = []
104
-
105
- # Use agent.iter() to get complete execution with tool calls
106
- usage_limits = UsageLimits(request_limit=max_iterations) if max_iterations else None
107
- async with agent.iter(prompt, usage_limits=usage_limits) as agent_run:
108
- async for node in agent_run:
109
- # Check if this is a model request node (includes tool calls and text)
110
- if PydanticAgent.is_model_request_node(node):
111
- # Stream events from model request
112
- request_stream: Any
113
- async with node.stream(agent_run.ctx) as request_stream:
114
- async for event in request_stream:
115
- # Tool call start event
116
- if isinstance(event, PartStartEvent) and isinstance(
117
- event.part, ToolCallPart
118
- ):
119
- tool_marker = f"\n[Calling: {event.part.tool_name}]"
120
- print(tool_marker, flush=True)
121
- assistant_response_parts.append(tool_marker)
122
-
123
- # Text content delta
124
- elif isinstance(event, PartDeltaEvent) and isinstance(
125
- event.delta, TextPartDelta
126
- ):
127
- print(event.delta.content_delta, end="", flush=True)
128
- assistant_response_parts.append(event.delta.content_delta)
129
-
130
- print("\n") # Final newline after streaming
131
-
132
- # Get final result from agent_run
133
- result = agent_run.result
134
- if hasattr(result, "output"):
135
- logger.info("Final structured result:")
136
- output = result.output
137
- from rem.agentic.serialization import serialize_agent_result
138
- output_json = json.dumps(serialize_agent_result(output), indent=2)
139
- print(output_json)
140
- assistant_response_parts.append(f"\n{output_json}")
141
-
142
- # Save session messages (if session_id provided and postgres enabled)
143
- if context and context.session_id and settings.postgres.enabled:
144
- from ...services.session.compression import SessionMessageStore
145
-
146
- # Extract just the user query from prompt
147
- # Prompt format from ContextBuilder: system + history + user message
148
- # We need to extract the last user message
149
- user_message_content = prompt.split("\n\n")[-1] if "\n\n" in prompt else prompt
150
-
151
- user_message = {
152
- "role": "user",
153
- "content": user_message_content,
154
- "timestamp": to_iso_with_z(utc_now()),
155
- }
156
-
157
- assistant_message = {
158
- "role": "assistant",
159
- "content": "".join(assistant_response_parts),
160
- "timestamp": to_iso_with_z(utc_now()),
161
- }
162
-
163
- # Store messages with compression
164
- store = SessionMessageStore(user_id=context.user_id or settings.test.effective_user_id)
165
- await store.store_session_messages(
101
+ # Save user message BEFORE streaming (same as API, using shared utility)
102
+ if context and context.session_id and user_message:
103
+ await save_user_message(
166
104
  session_id=context.session_id,
167
- messages=[user_message, assistant_message],
168
105
  user_id=context.user_id,
169
- compress=True,
106
+ content=user_message,
170
107
  )
171
108
 
172
- logger.debug(f"Saved conversation to session {context.session_id}")
109
+ # Use the API streaming code path for consistency
110
+ # This properly handles tool calls and message persistence
111
+ model_name = getattr(agent, 'model', 'unknown')
112
+ if hasattr(model_name, 'model_name'):
113
+ model_name = model_name.model_name
114
+ elif hasattr(model_name, 'name'):
115
+ model_name = model_name.name
116
+ else:
117
+ model_name = str(model_name)
118
+
119
+ async for chunk in stream_openai_response_with_save(
120
+ agent=agent.agent if hasattr(agent, 'agent') else agent,
121
+ prompt=prompt,
122
+ model=model_name,
123
+ session_id=context.session_id if context else None,
124
+ user_id=context.user_id if context else None,
125
+ agent_context=context,
126
+ ):
127
+ # Parse SSE chunks for CLI display
128
+ if chunk.startswith("event: tool_call"):
129
+ # Extract tool call info from next data line
130
+ continue
131
+ elif chunk.startswith("data: ") and not chunk.startswith("data: [DONE]"):
132
+ try:
133
+ data_str = chunk[6:].strip()
134
+ if data_str:
135
+ data = json.loads(data_str)
136
+ # Check for tool_call event
137
+ if data.get("type") == "tool_call":
138
+ tool_name = data.get("tool_name", "tool")
139
+ status = data.get("status", "")
140
+ if status == "started":
141
+ print(f"\n[Calling: {tool_name}]", flush=True)
142
+ # Check for text content (OpenAI format)
143
+ elif "choices" in data and data["choices"]:
144
+ delta = data["choices"][0].get("delta", {})
145
+ content = delta.get("content")
146
+ if content:
147
+ print(content, end="", flush=True)
148
+ except (json.JSONDecodeError, KeyError, IndexError):
149
+ pass
150
+
151
+ print("\n") # Final newline after streaming
152
+ logger.info("Final structured result:")
173
153
 
174
154
  except Exception as e:
175
155
  logger.error(f"Agent execution failed: {e}")
@@ -184,9 +164,13 @@ async def run_agent_non_streaming(
184
164
  context: AgentContext | None = None,
185
165
  plan: bool = False,
186
166
  max_iterations: int | None = None,
167
+ user_message: str | None = None,
187
168
  ) -> dict[str, Any] | None:
188
169
  """
189
- Run agent in non-streaming mode using agent.run() with usage limits.
170
+ Run agent in non-streaming mode using agent.iter() to capture tool calls.
171
+
172
+ This mirrors the streaming code path to ensure tool messages are properly
173
+ persisted to the database for state tracking across turns.
190
174
 
191
175
  Args:
192
176
  agent: Pydantic AI agent
@@ -196,77 +180,183 @@ async def run_agent_non_streaming(
196
180
  context: Optional AgentContext for session persistence
197
181
  plan: If True, output only the generated query (for query-agent)
198
182
  max_iterations: Maximum iterations/requests (from agent schema or settings)
183
+ user_message: The user's original message (for database storage)
199
184
 
200
185
  Returns:
201
186
  Output data if successful, None otherwise
202
187
  """
203
188
  from pydantic_ai import UsageLimits
189
+ from pydantic_ai.agent import Agent
190
+ from pydantic_ai.messages import (
191
+ FunctionToolResultEvent,
192
+ PartStartEvent,
193
+ PartEndEvent,
194
+ TextPart,
195
+ ToolCallPart,
196
+ )
204
197
  from rem.utils.date_utils import to_iso_with_z, utc_now
205
198
 
206
199
  logger.info("Running agent in non-streaming mode...")
207
200
 
208
201
  try:
209
- # Run agent and get complete result with usage limits
210
- usage_limits = UsageLimits(request_limit=max_iterations) if max_iterations else None
211
- result = await agent.run(prompt, usage_limits=usage_limits)
202
+ # Track tool calls for persistence (same as streaming code path)
203
+ tool_calls: list = []
204
+ pending_tool_data: dict = {}
205
+ pending_tool_completions: list = []
206
+ accumulated_content: list = []
207
+
208
+ # Get the underlying pydantic-ai agent
209
+ pydantic_agent = agent.agent if hasattr(agent, 'agent') else agent
210
+
211
+ # Use agent.iter() to capture tool calls (same as streaming)
212
+ async with pydantic_agent.iter(prompt) as agent_run:
213
+ async for node in agent_run:
214
+ # Handle model request nodes (text + tool call starts)
215
+ if Agent.is_model_request_node(node):
216
+ async with node.stream(agent_run.ctx) as request_stream:
217
+ async for event in request_stream:
218
+ # Capture text content
219
+ if isinstance(event, PartStartEvent) and isinstance(event.part, TextPart):
220
+ if event.part.content:
221
+ accumulated_content.append(event.part.content)
222
+
223
+ # Capture tool call starts
224
+ elif isinstance(event, PartStartEvent) and isinstance(event.part, ToolCallPart):
225
+ tool_name = event.part.tool_name
226
+ if tool_name == "final_result":
227
+ continue
228
+
229
+ import uuid
230
+ tool_id = f"call_{uuid.uuid4().hex[:8]}"
231
+ pending_tool_completions.append((tool_name, tool_id))
232
+
233
+ # Extract arguments
234
+ args_dict = {}
235
+ if hasattr(event.part, 'args'):
236
+ args = event.part.args
237
+ if isinstance(args, str):
238
+ try:
239
+ args_dict = json.loads(args)
240
+ except json.JSONDecodeError:
241
+ args_dict = {"raw": args}
242
+ elif isinstance(args, dict):
243
+ args_dict = args
244
+
245
+ pending_tool_data[tool_id] = {
246
+ "tool_name": tool_name,
247
+ "tool_id": tool_id,
248
+ "arguments": args_dict,
249
+ }
250
+
251
+ # Print tool call for CLI visibility
252
+ print(f"\n[Calling: {tool_name}]", flush=True)
253
+
254
+ # Capture tool call end (update arguments if changed)
255
+ elif isinstance(event, PartEndEvent) and isinstance(event.part, ToolCallPart):
256
+ pass # Arguments already captured at start
257
+
258
+ # Handle tool execution nodes (results)
259
+ elif Agent.is_call_tools_node(node):
260
+ async with node.stream(agent_run.ctx) as tools_stream:
261
+ async for event in tools_stream:
262
+ if isinstance(event, FunctionToolResultEvent):
263
+ # Get tool info from pending queue
264
+ if pending_tool_completions:
265
+ tool_name, tool_id = pending_tool_completions.pop(0)
266
+ else:
267
+ import uuid
268
+ tool_name = "tool"
269
+ tool_id = f"call_{uuid.uuid4().hex[:8]}"
270
+
271
+ result_content = event.result.content if hasattr(event.result, 'content') else event.result
272
+
273
+ # Capture tool call for persistence
274
+ if tool_id in pending_tool_data:
275
+ tool_data = pending_tool_data[tool_id]
276
+ tool_data["result"] = result_content
277
+ tool_calls.append(tool_data)
278
+ del pending_tool_data[tool_id]
279
+
280
+ # Get final result
281
+ result = agent_run.result
212
282
 
213
283
  # Extract output data
214
284
  output_data = None
215
285
  assistant_content = None
216
- if hasattr(result, "output"):
286
+ if result is not None and hasattr(result, "output"):
217
287
  output = result.output
218
288
  from rem.agentic.serialization import serialize_agent_result
219
289
  output_data = serialize_agent_result(output)
220
290
 
221
291
  if plan and isinstance(output_data, dict) and "query" in output_data:
222
- # Plan mode: Output only the query
223
- # Use sql formatting if possible or just raw string
224
292
  assistant_content = output_data["query"]
225
293
  print(assistant_content)
226
294
  else:
227
- # Normal mode
228
- assistant_content = json.dumps(output_data, indent=2)
295
+ # For string output, use it directly
296
+ if isinstance(output_data, str):
297
+ assistant_content = output_data
298
+ else:
299
+ assistant_content = json.dumps(output_data, indent=2)
229
300
  print(assistant_content)
230
301
  else:
231
- # Fallback for text-only results
232
- assistant_content = str(result)
233
- print(assistant_content)
302
+ assistant_content = str(result) if result else ""
303
+ if assistant_content:
304
+ print(assistant_content)
234
305
 
235
306
  # Save to file if requested
236
307
  if output_file and output_data:
237
308
  await _save_output_file(output_file, output_data)
238
309
 
239
- # Save session messages (if session_id provided and postgres enabled)
310
+ # Save session messages including tool calls (same as streaming code path)
240
311
  if context and context.session_id and settings.postgres.enabled:
241
312
  from ...services.session.compression import SessionMessageStore
242
313
 
243
- # Extract just the user query from prompt
244
- # Prompt format from ContextBuilder: system + history + user message
245
- # We need to extract the last user message
246
- user_message_content = prompt.split("\n\n")[-1] if "\n\n" in prompt else prompt
314
+ timestamp = to_iso_with_z(utc_now())
315
+ messages_to_store = []
247
316
 
248
- user_message = {
317
+ # Save user message first
318
+ user_message_content = user_message or (prompt.split("\n\n")[-1] if "\n\n" in prompt else prompt)
319
+ messages_to_store.append({
249
320
  "role": "user",
250
321
  "content": user_message_content,
251
- "timestamp": to_iso_with_z(utc_now()),
252
- }
253
-
254
- assistant_message = {
255
- "role": "assistant",
256
- "content": assistant_content,
257
- "timestamp": to_iso_with_z(utc_now()),
258
- }
259
-
260
- # Store messages with compression
322
+ "timestamp": timestamp,
323
+ })
324
+
325
+ # Save tool call messages (message_type: "tool") - CRITICAL for state tracking
326
+ for tool_call in tool_calls:
327
+ if not tool_call:
328
+ continue
329
+ tool_message = {
330
+ "role": "tool",
331
+ "content": json.dumps(tool_call.get("result", {}), default=str),
332
+ "timestamp": timestamp,
333
+ "tool_call_id": tool_call.get("tool_id"),
334
+ "tool_name": tool_call.get("tool_name"),
335
+ "tool_arguments": tool_call.get("arguments"),
336
+ }
337
+ messages_to_store.append(tool_message)
338
+
339
+ # Save assistant message
340
+ if assistant_content:
341
+ messages_to_store.append({
342
+ "role": "assistant",
343
+ "content": assistant_content,
344
+ "timestamp": timestamp,
345
+ })
346
+
347
+ # Store all messages
261
348
  store = SessionMessageStore(user_id=context.user_id or settings.test.effective_user_id)
262
349
  await store.store_session_messages(
263
350
  session_id=context.session_id,
264
- messages=[user_message, assistant_message],
351
+ messages=messages_to_store,
265
352
  user_id=context.user_id,
266
- compress=True,
353
+ compress=False, # Store uncompressed; compression happens on reload
267
354
  )
268
355
 
269
- logger.debug(f"Saved conversation to session {context.session_id}")
356
+ logger.debug(
357
+ f"Saved {len(tool_calls)} tool calls + user/assistant messages "
358
+ f"to session {context.session_id}"
359
+ )
270
360
 
271
361
  return output_data
272
362
 
@@ -352,8 +442,8 @@ async def _save_output_file(file_path: Path, data: dict[str, Any]) -> None:
352
442
  )
353
443
  @click.option(
354
444
  "--stream/--no-stream",
355
- default=False,
356
- help="Enable streaming mode (default: disabled)",
445
+ default=True,
446
+ help="Enable streaming mode (default: enabled)",
357
447
  )
358
448
  @click.option(
359
449
  "--user-id",
@@ -549,7 +639,7 @@ async def _ask_async(
549
639
 
550
640
  # Run agent with session persistence
551
641
  if stream:
552
- await run_agent_streaming(agent, prompt, max_turns=max_turns, context=context)
642
+ await run_agent_streaming(agent, prompt, max_turns=max_turns, context=context, user_message=query)
553
643
  else:
554
644
  await run_agent_non_streaming(
555
645
  agent,
@@ -558,6 +648,7 @@ async def _ask_async(
558
648
  output_file=output_file,
559
649
  context=context,
560
650
  plan=plan,
651
+ user_message=query,
561
652
  )
562
653
 
563
654
  # Log session ID for reuse
rem/cli/commands/db.py CHANGED
@@ -375,8 +375,10 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
375
375
  import polars as pl
376
376
  import yaml
377
377
  from ...models.core.inline_edge import InlineEdge
378
- from ...models.entities import Resource, Moment, User, Message, SharedSession, Schema
378
+ from ...models.entities import SharedSession
379
379
  from ...services.postgres import get_postgres_service
380
+ from ...utils.model_helpers import get_table_name
381
+ from ... import get_model_registry
380
382
 
381
383
  logger.info(f"Loading data from: {file_path}")
382
384
  scope_msg = f"user: {user_id}" if user_id else "public"
@@ -385,13 +387,12 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
385
387
  suffix = file_path.suffix.lower()
386
388
  is_yaml = suffix in {".yaml", ".yml"}
387
389
 
388
- # Map table names to model classes
390
+ # Build MODEL_MAP dynamically from registry
391
+ registry = get_model_registry()
392
+ registry.register_core_models()
389
393
  MODEL_MAP = {
390
- "users": User,
391
- "moments": Moment,
392
- "resources": Resource,
393
- "messages": Message,
394
- "schemas": Schema,
394
+ get_table_name(model): model
395
+ for model in registry.get_model_classes().values()
395
396
  }
396
397
 
397
398
  # Non-CoreModel tables that need direct SQL insertion
@@ -432,12 +433,9 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
432
433
  logger.info(f"Columns: {list(df.columns)}")
433
434
 
434
435
  # Validate first row against model if table is known
435
- if table in {"users", "moments", "resources", "messages", "schemas"} and rows:
436
- from ...models.entities import Resource, Moment, User, Message, Schema
436
+ if table in MODEL_MAP and rows:
437
437
  from ...utils.model_helpers import validate_data_for_model
438
- model_map = {"users": User, "moments": Moment, "resources": Resource,
439
- "messages": Message, "schemas": Schema}
440
- result = validate_data_for_model(model_map[table], rows[0])
438
+ result = validate_data_for_model(MODEL_MAP[table], rows[0])
441
439
  if result.extra_fields:
442
440
  logger.warning(f"Unknown fields (ignored): {result.extra_fields}")
443
441
  if result.valid:
@@ -457,6 +455,10 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
457
455
 
458
456
  await pg.connect()
459
457
 
458
+ # Start embedding worker for generating embeddings
459
+ if pg.embedding_worker:
460
+ await pg.embedding_worker.start()
461
+
460
462
  try:
461
463
  total_loaded = 0
462
464
 
@@ -467,8 +469,7 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
467
469
  # Handle direct insert tables (non-CoreModel)
468
470
  if table_name in DIRECT_INSERT_TABLES:
469
471
  for row_data in rows:
470
- if "tenant_id" not in row_data:
471
- row_data["tenant_id"] = "default"
472
+ # tenant_id is optional - NULL means public/shared
472
473
 
473
474
  if table_name == "shared_sessions":
474
475
  await pg.fetch(
@@ -479,7 +480,7 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
479
480
  row_data["session_id"],
480
481
  row_data["owner_user_id"],
481
482
  row_data["shared_with_user_id"],
482
- row_data["tenant_id"],
483
+ row_data.get("tenant_id"), # Optional - NULL means public
483
484
  )
484
485
  total_loaded += 1
485
486
  logger.success(f"Loaded shared_session: {row_data['owner_user_id']} -> {row_data['shared_with_user_id']}")
@@ -492,10 +493,8 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
492
493
  model_class = MODEL_MAP[table_name]
493
494
 
494
495
  for row_idx, row_data in enumerate(rows):
495
- if "user_id" not in row_data and user_id is not None:
496
- row_data["user_id"] = user_id
497
- if "tenant_id" not in row_data and user_id is not None:
498
- row_data["tenant_id"] = row_data.get("user_id", user_id)
496
+ # tenant_id and user_id are optional - NULL means public/shared data
497
+ # Data files can explicitly set tenant_id/user_id if needed
499
498
 
500
499
  # Convert graph_edges to InlineEdge format if present
501
500
  if "graph_edges" in row_data:
@@ -530,6 +529,14 @@ async def _load_async(file_path: Path, table: str | None, user_id: str | None, d
530
529
 
531
530
  logger.success(f"Data loaded successfully! Total rows: {total_loaded}")
532
531
 
532
+ # Wait for embeddings to complete
533
+ if pg.embedding_worker and pg.embedding_worker.running:
534
+ queue_size = pg.embedding_worker.task_queue.qsize()
535
+ if queue_size > 0:
536
+ logger.info(f"Waiting for {queue_size} embeddings to complete...")
537
+ await pg.embedding_worker.stop()
538
+ logger.success("Embeddings generated successfully")
539
+
533
540
  finally:
534
541
  await pg.disconnect()
535
542
 
@@ -634,7 +641,7 @@ async def _diff_async(
634
641
 
635
642
  if not result.has_changes:
636
643
  click.secho("✓ No schema drift detected", fg="green")
637
- click.echo(" Database matches Pydantic models")
644
+ click.echo(" Database matches source (tables, functions, triggers, views)")
638
645
  if result.filtered_count > 0:
639
646
  click.echo()
640
647
  click.secho(f" ({result.filtered_count} destructive change(s) hidden by '{strategy}' strategy)", fg="yellow")
@@ -646,17 +653,34 @@ async def _diff_async(
646
653
  if result.filtered_count > 0:
647
654
  click.secho(f" ({result.filtered_count} destructive change(s) hidden by '{strategy}' strategy)", fg="yellow")
648
655
  click.echo()
649
- click.echo("Changes:")
650
- for line in result.summary:
651
- if line.startswith("+"):
652
- click.secho(f" {line}", fg="green")
653
- elif line.startswith("-"):
654
- click.secho(f" {line}", fg="red")
655
- elif line.startswith("~"):
656
- click.secho(f" {line}", fg="yellow")
657
- else:
658
- click.echo(f" {line}")
659
- click.echo()
656
+
657
+ # Table/column changes (Alembic)
658
+ if result.summary:
659
+ click.echo("Table Changes:")
660
+ for line in result.summary:
661
+ if line.startswith("+"):
662
+ click.secho(f" {line}", fg="green")
663
+ elif line.startswith("-"):
664
+ click.secho(f" {line}", fg="red")
665
+ elif line.startswith("~"):
666
+ click.secho(f" {line}", fg="yellow")
667
+ else:
668
+ click.echo(f" {line}")
669
+ click.echo()
670
+
671
+ # Programmable object changes (functions, triggers, views)
672
+ if result.programmable_summary:
673
+ click.echo("Programmable Objects (functions/triggers/views):")
674
+ for line in result.programmable_summary:
675
+ if line.startswith("+"):
676
+ click.secho(f" {line}", fg="green")
677
+ elif line.startswith("-"):
678
+ click.secho(f" {line}", fg="red")
679
+ elif line.startswith("~"):
680
+ click.secho(f" {line}", fg="yellow")
681
+ else:
682
+ click.echo(f" {line}")
683
+ click.echo()
660
684
 
661
685
  # Generate migration if requested
662
686
  if generate:
@@ -1568,7 +1568,7 @@ def export(
1568
1568
  rem experiments export my-experiment
1569
1569
 
1570
1570
  # Export to specific bucket
1571
- rem experiments export my-experiment --bucket siggy-data
1571
+ rem experiments export my-experiment --bucket my-data-lake
1572
1572
 
1573
1573
  # Include results in export
1574
1574
  rem experiments export my-experiment --include-results