remdb 0.3.114__py3-none-any.whl → 0.3.127__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

Files changed (41) hide show
  1. rem/agentic/agents/sse_simulator.py +2 -0
  2. rem/agentic/context.py +23 -3
  3. rem/agentic/mcp/tool_wrapper.py +29 -3
  4. rem/agentic/otel/setup.py +1 -0
  5. rem/agentic/providers/pydantic_ai.py +26 -2
  6. rem/api/main.py +4 -1
  7. rem/api/mcp_router/server.py +9 -3
  8. rem/api/mcp_router/tools.py +324 -2
  9. rem/api/routers/admin.py +218 -1
  10. rem/api/routers/chat/completions.py +250 -4
  11. rem/api/routers/chat/models.py +81 -7
  12. rem/api/routers/chat/otel_utils.py +33 -0
  13. rem/api/routers/chat/sse_events.py +17 -1
  14. rem/api/routers/chat/streaming.py +35 -1
  15. rem/api/routers/feedback.py +134 -14
  16. rem/api/routers/query.py +6 -3
  17. rem/cli/commands/README.md +42 -0
  18. rem/cli/commands/cluster.py +617 -168
  19. rem/cli/commands/configure.py +1 -3
  20. rem/cli/commands/db.py +66 -22
  21. rem/cli/commands/experiments.py +242 -26
  22. rem/cli/commands/schema.py +6 -5
  23. rem/config.py +8 -1
  24. rem/services/phoenix/client.py +59 -18
  25. rem/services/postgres/diff_service.py +108 -3
  26. rem/services/postgres/schema_generator.py +205 -4
  27. rem/services/session/compression.py +7 -0
  28. rem/settings.py +150 -18
  29. rem/sql/migrations/001_install.sql +156 -0
  30. rem/sql/migrations/002_install_models.sql +1864 -1
  31. rem/sql/migrations/004_cache_system.sql +548 -0
  32. rem/utils/__init__.py +18 -0
  33. rem/utils/schema_loader.py +94 -3
  34. rem/utils/sql_paths.py +146 -0
  35. rem/workers/__init__.py +3 -1
  36. rem/workers/db_listener.py +579 -0
  37. rem/workers/unlogged_maintainer.py +463 -0
  38. {remdb-0.3.114.dist-info → remdb-0.3.127.dist-info}/METADATA +213 -177
  39. {remdb-0.3.114.dist-info → remdb-0.3.127.dist-info}/RECORD +41 -36
  40. {remdb-0.3.114.dist-info → remdb-0.3.127.dist-info}/WHEEL +0 -0
  41. {remdb-0.3.114.dist-info → remdb-0.3.127.dist-info}/entry_points.txt +0 -0
@@ -793,40 +793,72 @@ class PhoenixClient:
793
793
  score: float | None = None,
794
794
  explanation: str | None = None,
795
795
  metadata: dict[str, Any] | None = None,
796
+ trace_id: str | None = None,
796
797
  ) -> str | None:
797
- """Add feedback annotation to a span.
798
+ """Add feedback annotation to a span via Phoenix REST API.
799
+
800
+ Uses direct HTTP POST to /v1/span_annotations for reliability
801
+ (Phoenix Python client API changes frequently).
798
802
 
799
803
  Args:
800
- span_id: Span ID to annotate
804
+ span_id: Span ID to annotate (hex string)
801
805
  annotation_name: Name of the annotation (e.g., "correctness", "user_feedback")
802
806
  annotator_kind: Type of annotator ("HUMAN", "LLM", "CODE")
803
807
  label: Optional label (e.g., "correct", "incorrect", "helpful")
804
808
  score: Optional numeric score (0.0-1.0)
805
809
  explanation: Optional explanation text
806
810
  metadata: Optional additional metadata dict
811
+ trace_id: Optional trace ID (used if span lookup needed)
807
812
 
808
813
  Returns:
809
814
  Annotation ID if successful, None otherwise
810
815
  """
816
+ import httpx
817
+
811
818
  try:
812
- result = self._client.add_span_annotation( # type: ignore[attr-defined]
813
- span_id=span_id,
814
- name=annotation_name,
815
- annotator_kind=annotator_kind,
816
- label=label,
817
- score=score,
818
- explanation=explanation,
819
- metadata=metadata,
820
- )
819
+ # Build annotation payload for Phoenix REST API
820
+ annotation_data = {
821
+ "span_id": span_id,
822
+ "name": annotation_name,
823
+ "annotator_kind": annotator_kind,
824
+ "result": {
825
+ "label": label,
826
+ "score": score,
827
+ "explanation": explanation,
828
+ },
829
+ "metadata": metadata or {},
830
+ }
821
831
 
822
- annotation_id = getattr(result, "id", None) if result else None
823
- logger.info(f"Added {annotator_kind} feedback to span {span_id} -> {annotation_id}")
832
+ # Add trace_id if provided
833
+ if trace_id:
834
+ annotation_data["trace_id"] = trace_id
835
+
836
+ # POST to Phoenix REST API
837
+ annotations_endpoint = f"{self.config.base_url}/v1/span_annotations"
838
+ headers = {}
839
+ if self.config.api_key:
840
+ headers["Authorization"] = f"Bearer {self.config.api_key}"
841
+
842
+ with httpx.Client(timeout=5.0) as client:
843
+ response = client.post(
844
+ annotations_endpoint,
845
+ json={"data": [annotation_data]},
846
+ headers=headers,
847
+ )
848
+ response.raise_for_status()
824
849
 
825
- return annotation_id
850
+ logger.info(f"Added {annotator_kind} feedback to span {span_id}")
851
+ return span_id # Return span_id as annotation reference
826
852
 
853
+ except httpx.HTTPStatusError as e:
854
+ logger.error(
855
+ f"Failed to add span feedback (HTTP {e.response.status_code}): "
856
+ f"{e.response.text if hasattr(e, 'response') else 'N/A'}"
857
+ )
858
+ return None
827
859
  except Exception as e:
828
860
  logger.error(f"Failed to add span feedback: {e}")
829
- raise
861
+ return None
830
862
 
831
863
  def sync_user_feedback(
832
864
  self,
@@ -835,6 +867,7 @@ class PhoenixClient:
835
867
  categories: list[str] | None = None,
836
868
  comment: str | None = None,
837
869
  feedback_id: str | None = None,
870
+ trace_id: str | None = None,
838
871
  ) -> str | None:
839
872
  """Sync user feedback to Phoenix as a span annotation.
840
873
 
@@ -847,6 +880,7 @@ class PhoenixClient:
847
880
  categories: List of feedback categories
848
881
  comment: Free-text comment
849
882
  feedback_id: Optional REM feedback ID for reference
883
+ trace_id: Optional trace ID for the span
850
884
 
851
885
  Returns:
852
886
  Phoenix annotation ID if successful
@@ -860,12 +894,18 @@ class PhoenixClient:
860
894
  ... )
861
895
  """
862
896
  # Convert rating to 0-1 score
897
+ # Rating scheme:
898
+ # -1 = thumbs down → score 0.0
899
+ # 1 = thumbs up → score 1.0
900
+ # 2-5 = star rating → normalized to 0-1 range
863
901
  score = None
864
902
  if rating is not None:
865
903
  if rating == -1:
866
904
  score = 0.0
867
- elif 1 <= rating <= 5:
868
- score = rating / 5.0
905
+ elif rating == 1:
906
+ score = 1.0 # Thumbs up
907
+ elif 2 <= rating <= 5:
908
+ score = (rating - 1) / 4.0 # 2→0.25, 3→0.5, 4→0.75, 5→1.0
869
909
 
870
910
  # Use primary category as label
871
911
  label = categories[0] if categories else None
@@ -880,7 +920,7 @@ class PhoenixClient:
880
920
  explanation = f"Categories: {cats_str}"
881
921
 
882
922
  # Build metadata
883
- metadata = {
923
+ metadata: dict[str, Any] = {
884
924
  "rating": rating,
885
925
  "categories": categories or [],
886
926
  }
@@ -895,6 +935,7 @@ class PhoenixClient:
895
935
  score=score,
896
936
  explanation=explanation,
897
937
  metadata=metadata,
938
+ trace_id=trace_id,
898
939
  )
899
940
 
900
941
  def get_span_annotations(
@@ -49,6 +49,7 @@ class SchemaDiff:
49
49
  summary: list[str] = field(default_factory=list)
50
50
  sql: str = ""
51
51
  upgrade_ops: Optional[ops.UpgradeOps] = None
52
+ filtered_count: int = 0 # Number of operations filtered out by strategy
52
53
 
53
54
  @property
54
55
  def change_count(self) -> int:
@@ -61,17 +62,24 @@ class DiffService:
61
62
  Service for comparing Pydantic models against database schema.
62
63
 
63
64
  Uses Alembic's autogenerate machinery without creating revision files.
65
+
66
+ Strategies:
67
+ additive: Only ADD operations (columns, tables, indexes). No drops. Safe for production.
68
+ full: All operations including DROPs. Use with caution.
69
+ safe: Additive + safe column type changes (widenings like VARCHAR(50) -> VARCHAR(256)).
64
70
  """
65
71
 
66
- def __init__(self, models_dir: Optional[Path] = None):
72
+ def __init__(self, models_dir: Optional[Path] = None, strategy: str = "additive"):
67
73
  """
68
74
  Initialize diff service.
69
75
 
70
76
  Args:
71
77
  models_dir: Directory containing Pydantic models.
72
78
  If None, uses default rem/models/entities location.
79
+ strategy: Migration strategy - 'additive' (default), 'full', or 'safe'
73
80
  """
74
81
  self.models_dir = models_dir
82
+ self.strategy = strategy
75
83
  self._metadata = None
76
84
 
77
85
  def get_connection_url(self) -> str:
@@ -130,6 +138,7 @@ class DiffService:
130
138
  metadata = self.get_target_metadata()
131
139
 
132
140
  summary = []
141
+ filtered_count = 0
133
142
 
134
143
  with engine.connect() as conn:
135
144
  # Create migration context for comparison
@@ -148,9 +157,13 @@ class DiffService:
148
157
  migration_script = produce_migrations(context, metadata)
149
158
  upgrade_ops = migration_script.upgrade_ops
150
159
 
151
- # Process detected operations
160
+ # Filter operations based on strategy
152
161
  if upgrade_ops and upgrade_ops.ops:
153
- for op in upgrade_ops.ops:
162
+ filtered_ops, filtered_count = self._filter_operations(upgrade_ops.ops)
163
+ upgrade_ops.ops = filtered_ops
164
+
165
+ # Process filtered operations
166
+ for op in filtered_ops:
154
167
  summary.extend(self._describe_operation(op))
155
168
 
156
169
  has_changes = len(summary) > 0
@@ -165,8 +178,100 @@ class DiffService:
165
178
  summary=summary,
166
179
  sql=sql,
167
180
  upgrade_ops=upgrade_ops,
181
+ filtered_count=filtered_count,
168
182
  )
169
183
 
184
+ def _filter_operations(self, operations: list) -> tuple[list, int]:
185
+ """
186
+ Filter operations based on migration strategy.
187
+
188
+ Args:
189
+ operations: List of Alembic operations
190
+
191
+ Returns:
192
+ Tuple of (filtered_operations, count_of_filtered_out)
193
+ """
194
+ if self.strategy == "full":
195
+ # Full strategy: include everything
196
+ return operations, 0
197
+
198
+ filtered = []
199
+ filtered_count = 0
200
+
201
+ for op in operations:
202
+ if isinstance(op, ops.ModifyTableOps):
203
+ # Filter sub-operations within table
204
+ sub_filtered, sub_count = self._filter_operations(op.ops)
205
+ filtered_count += sub_count
206
+ if sub_filtered:
207
+ op.ops = sub_filtered
208
+ filtered.append(op)
209
+ elif self._is_allowed_operation(op):
210
+ filtered.append(op)
211
+ else:
212
+ filtered_count += 1
213
+
214
+ return filtered, filtered_count
215
+
216
+ def _is_allowed_operation(self, op: ops.MigrateOperation) -> bool:
217
+ """
218
+ Check if an operation is allowed by the current strategy.
219
+
220
+ Args:
221
+ op: Alembic operation
222
+
223
+ Returns:
224
+ True if operation is allowed, False if it should be filtered out
225
+ """
226
+ # Additive operations (allowed in all strategies)
227
+ if isinstance(op, (ops.CreateTableOp, ops.AddColumnOp, ops.CreateIndexOp, ops.CreateForeignKeyOp)):
228
+ return True
229
+
230
+ # Destructive operations (only allowed in 'full' strategy)
231
+ if isinstance(op, (ops.DropTableOp, ops.DropColumnOp, ops.DropIndexOp, ops.DropConstraintOp)):
232
+ return self.strategy == "full"
233
+
234
+ # Alter operations
235
+ if isinstance(op, ops.AlterColumnOp):
236
+ if self.strategy == "full":
237
+ return True
238
+ if self.strategy == "safe":
239
+ # Allow safe type changes (widenings)
240
+ return self._is_safe_type_change(op)
241
+ # additive: no alter operations
242
+ return False
243
+
244
+ # Unknown operations: allow in full, deny otherwise
245
+ return self.strategy == "full"
246
+
247
+ def _is_safe_type_change(self, op: ops.AlterColumnOp) -> bool:
248
+ """
249
+ Check if a column type change is safe (widening, not narrowing).
250
+
251
+ Safe changes:
252
+ - VARCHAR(n) -> VARCHAR(m) where m > n
253
+ - INTEGER -> BIGINT
254
+ - Adding nullable (NOT NULL -> NULL)
255
+
256
+ Args:
257
+ op: AlterColumnOp to check
258
+
259
+ Returns:
260
+ True if the change is safe
261
+ """
262
+ # Allowing nullable is always safe
263
+ if op.modify_nullable is True:
264
+ return True
265
+
266
+ # Type changes: only allow VARCHAR widenings for now
267
+ if op.modify_type is not None:
268
+ new_type = str(op.modify_type).upper()
269
+ # VARCHAR widenings are generally safe
270
+ if "VARCHAR" in new_type:
271
+ return True # Assume widening; could add length comparison
272
+
273
+ return False
274
+
170
275
  def _describe_operation(self, op: ops.MigrateOperation, prefix: str = "") -> list[str]:
171
276
  """Convert Alembic operation to human-readable description."""
172
277
  descriptions = []
@@ -12,6 +12,7 @@ Output includes:
12
12
  - KV_STORE triggers
13
13
  - Indexes (foreground and background)
14
14
  - Migrations
15
+ - Schema table entries (for agent-like table access)
15
16
 
16
17
  Usage:
17
18
  from rem.services.postgres.schema_generator import SchemaGenerator
@@ -30,14 +31,192 @@ Usage:
30
31
 
31
32
  import importlib.util
32
33
  import inspect
34
+ import json
35
+ import uuid
33
36
  from pathlib import Path
34
- from typing import Type
37
+ from typing import Any, Type
35
38
 
36
39
  from loguru import logger
37
40
  from pydantic import BaseModel
38
41
 
39
42
  from ...settings import settings
40
- from .register_type import register_type
43
+ from ...utils.sql_paths import get_package_sql_dir
44
+ from .register_type import register_type, should_embed_field
45
+
46
+ # Namespace UUID for generating deterministic UUIDs from model names
47
+ # Using UUID5 with this namespace ensures same model always gets same UUID
48
+ REM_SCHEMA_NAMESPACE = uuid.UUID("6ba7b810-9dad-11d1-80b4-00c04fd430c8") # DNS namespace
49
+
50
+
51
+ def generate_model_uuid(fully_qualified_name: str) -> uuid.UUID:
52
+ """
53
+ Generate deterministic UUID from fully qualified model name.
54
+
55
+ Uses UUID5 (SHA-1 hash) with REM namespace for reproducibility.
56
+ Same fully qualified name always produces same UUID.
57
+
58
+ Args:
59
+ fully_qualified_name: Full module path, e.g., "rem.models.entities.Resource"
60
+
61
+ Returns:
62
+ Deterministic UUID for this model
63
+ """
64
+ return uuid.uuid5(REM_SCHEMA_NAMESPACE, fully_qualified_name)
65
+
66
+
67
+ def extract_model_schema_metadata(
68
+ model: Type[BaseModel],
69
+ table_name: str,
70
+ entity_key_field: str,
71
+ include_search_tool: bool = True,
72
+ ) -> dict[str, Any]:
73
+ """
74
+ Extract schema metadata from a Pydantic model for schemas table.
75
+
76
+ Args:
77
+ model: Pydantic model class
78
+ table_name: Database table name
79
+ entity_key_field: Field used as entity key in kv_store
80
+ include_search_tool: If True, add search_rem tool for querying this table
81
+
82
+ Returns:
83
+ Dict with schema metadata ready for schemas table insert
84
+ """
85
+ # Get fully qualified name
86
+ fqn = f"{model.__module__}.{model.__name__}"
87
+
88
+ # Generate deterministic UUID
89
+ schema_id = generate_model_uuid(fqn)
90
+
91
+ # Get JSON schema from Pydantic
92
+ json_schema = model.model_json_schema()
93
+
94
+ # Find embedding fields
95
+ embedding_fields = []
96
+ for field_name, field_info in model.model_fields.items():
97
+ if should_embed_field(field_name, field_info):
98
+ embedding_fields.append(field_name)
99
+
100
+ # Build description with search capability note
101
+ base_description = model.__doc__ or f"Schema for {model.__name__}"
102
+ search_note = (
103
+ f"\n\nThis agent can search the `{table_name}` table using the `search_rem` tool. "
104
+ f"Use REM query syntax: LOOKUP for exact match, FUZZY for typo-tolerant search, "
105
+ f"SEARCH for semantic similarity, or SQL for complex queries."
106
+ ) if include_search_tool else ""
107
+
108
+ # Build spec with table metadata and tools
109
+ # Note: default_search_table is used by create_agent to append a description
110
+ # suffix to the search_rem tool when loading it dynamically
111
+ has_embeddings = bool(embedding_fields)
112
+
113
+ spec = {
114
+ "type": "object",
115
+ "description": base_description + search_note,
116
+ "properties": json_schema.get("properties", {}),
117
+ "required": json_schema.get("required", []),
118
+ "json_schema_extra": {
119
+ "table_name": table_name,
120
+ "entity_key_field": entity_key_field,
121
+ "embedding_fields": embedding_fields,
122
+ "fully_qualified_name": fqn,
123
+ "tools": ["search_rem"] if include_search_tool else [],
124
+ "default_search_table": table_name,
125
+ "has_embeddings": has_embeddings,
126
+ },
127
+ }
128
+
129
+ # Build content (documentation)
130
+ content = f"""# {model.__name__}
131
+
132
+ {base_description}
133
+
134
+ ## Overview
135
+
136
+ The `{model.__name__}` entity is stored in the `{table_name}` table. Each record is uniquely
137
+ identified by its `{entity_key_field}` field for lookups and graph traversal.
138
+
139
+ ## Search Capabilities
140
+
141
+ This schema includes the `search_rem` tool which supports:
142
+ - **LOOKUP**: O(1) exact match by {entity_key_field} (e.g., `LOOKUP "entity-name"`)
143
+ - **FUZZY**: Typo-tolerant search (e.g., `FUZZY "partial" THRESHOLD 0.3`)
144
+ - **SEARCH**: Semantic vector search on {', '.join(embedding_fields) if embedding_fields else 'content'} (e.g., `SEARCH "concept" FROM {table_name} LIMIT 10`)
145
+ - **SQL**: Complex queries (e.g., `SELECT * FROM {table_name} WHERE ...`)
146
+
147
+ ## Table Info
148
+
149
+ | Property | Value |
150
+ |----------|-------|
151
+ | Table | `{table_name}` |
152
+ | Entity Key | `{entity_key_field}` |
153
+ | Embedding Fields | {', '.join(f'`{f}`' for f in embedding_fields) if embedding_fields else 'None'} |
154
+ | Tools | {', '.join(['`search_rem`'] if include_search_tool else ['None'])} |
155
+
156
+ ## Fields
157
+
158
+ """
159
+ for field_name, field_info in model.model_fields.items():
160
+ field_type = str(field_info.annotation) if field_info.annotation else "Any"
161
+ field_desc = field_info.description or ""
162
+ required = "Required" if field_info.is_required() else "Optional"
163
+ content += f"### `{field_name}`\n"
164
+ content += f"- **Type**: `{field_type}`\n"
165
+ content += f"- **{required}**\n"
166
+ if field_desc:
167
+ content += f"- {field_desc}\n"
168
+ content += "\n"
169
+
170
+ return {
171
+ "id": str(schema_id),
172
+ "name": model.__name__,
173
+ "table_name": table_name,
174
+ "entity_key_field": entity_key_field,
175
+ "embedding_fields": embedding_fields,
176
+ "fqn": fqn,
177
+ "spec": spec,
178
+ "content": content,
179
+ "category": "entity",
180
+ }
181
+
182
+
183
+ def generate_schema_upsert_sql(schema_metadata: dict[str, Any]) -> str:
184
+ """
185
+ Generate SQL UPSERT statement for schemas table.
186
+
187
+ Uses ON CONFLICT DO UPDATE for idempotency.
188
+
189
+ Args:
190
+ schema_metadata: Dict from extract_model_schema_metadata()
191
+
192
+ Returns:
193
+ SQL INSERT ... ON CONFLICT statement
194
+ """
195
+ # Escape single quotes in content and spec
196
+ content_escaped = schema_metadata["content"].replace("'", "''")
197
+ spec_json = json.dumps(schema_metadata["spec"]).replace("'", "''")
198
+
199
+ sql = f"""
200
+ -- Schema entry for {schema_metadata['name']} ({schema_metadata['table_name']})
201
+ INSERT INTO schemas (id, tenant_id, name, content, spec, category, metadata)
202
+ VALUES (
203
+ '{schema_metadata['id']}'::uuid,
204
+ 'system',
205
+ '{schema_metadata['name']}',
206
+ '{content_escaped}',
207
+ '{spec_json}'::jsonb,
208
+ 'entity',
209
+ '{{"table_name": "{schema_metadata['table_name']}", "entity_key_field": "{schema_metadata['entity_key_field']}", "embedding_fields": {json.dumps(schema_metadata['embedding_fields'])}, "fqn": "{schema_metadata['fqn']}"}}'::jsonb
210
+ )
211
+ ON CONFLICT (id) DO UPDATE SET
212
+ name = EXCLUDED.name,
213
+ content = EXCLUDED.content,
214
+ spec = EXCLUDED.spec,
215
+ category = EXCLUDED.category,
216
+ metadata = EXCLUDED.metadata,
217
+ updated_at = CURRENT_TIMESTAMP;
218
+ """
219
+ return sql.strip()
41
220
 
42
221
 
43
222
  class SchemaGenerator:
@@ -56,9 +235,9 @@ class SchemaGenerator:
56
235
  Initialize schema generator.
57
236
 
58
237
  Args:
59
- output_dir: Optional directory for output files (defaults to settings.sql_dir)
238
+ output_dir: Optional directory for output files (defaults to package sql dir)
60
239
  """
61
- self.output_dir = output_dir or Path(settings.sql_dir)
240
+ self.output_dir = output_dir or get_package_sql_dir()
62
241
  self.schemas: dict[str, dict] = {}
63
242
 
64
243
  def discover_models(self, directory: str | Path) -> dict[str, Type[BaseModel]]:
@@ -234,6 +413,14 @@ class SchemaGenerator:
234
413
  create_kv_trigger=True,
235
414
  )
236
415
 
416
+ # Extract schema metadata for schemas table entry
417
+ schema_metadata = extract_model_schema_metadata(
418
+ model=model,
419
+ table_name=table_name,
420
+ entity_key_field=entity_key_field,
421
+ )
422
+ schema["schema_metadata"] = schema_metadata
423
+
237
424
  self.schemas[table_name] = schema
238
425
  return schema
239
426
 
@@ -343,6 +530,7 @@ class SchemaGenerator:
343
530
  "-- 2. Embeddings tables (embeddings_<table>)",
344
531
  "-- 3. KV_STORE triggers for cache maintenance",
345
532
  "-- 4. Indexes (foreground only, background indexes separate)",
533
+ "-- 5. Schema table entries (for agent-like table access)",
346
534
  "",
347
535
  "-- ============================================================================",
348
536
  "-- PREREQUISITES CHECK",
@@ -388,6 +576,19 @@ class SchemaGenerator:
388
576
  sql_parts.append(schema["sql"]["kv_trigger"])
389
577
  sql_parts.append("")
390
578
 
579
+ # Add schema table entries (every entity table is also an "agent")
580
+ sql_parts.append("-- ============================================================================")
581
+ sql_parts.append("-- SCHEMA TABLE ENTRIES")
582
+ sql_parts.append("-- Every entity table gets a schemas entry for agent-like access")
583
+ sql_parts.append("-- ============================================================================")
584
+ sql_parts.append("")
585
+
586
+ for table_name, schema in self.schemas.items():
587
+ if "schema_metadata" in schema:
588
+ schema_upsert = generate_schema_upsert_sql(schema["schema_metadata"])
589
+ sql_parts.append(schema_upsert)
590
+ sql_parts.append("")
591
+
391
592
  # Add migration record
392
593
  sql_parts.append("-- ============================================================================")
393
594
  sql_parts.append("-- RECORD MIGRATION")
@@ -170,12 +170,16 @@ class SessionMessageStore:
170
170
  entity_key = truncate_key(f"session-{session_id}-msg-{message_index}")
171
171
 
172
172
  # Create Message entity for assistant response
173
+ # Use pre-generated id from message dict if available (for frontend feedback)
173
174
  msg = Message(
175
+ id=message.get("id"), # Use pre-generated ID if provided
174
176
  content=message.get("content", ""),
175
177
  message_type=message.get("role", "assistant"),
176
178
  session_id=session_id,
177
179
  tenant_id=self.user_id, # Set tenant_id to user_id (application scoped to user)
178
180
  user_id=user_id or self.user_id,
181
+ trace_id=message.get("trace_id"),
182
+ span_id=message.get("span_id"),
179
183
  metadata={
180
184
  "message_index": message_index,
181
185
  "entity_key": entity_key, # Store entity key for LOOKUP
@@ -284,11 +288,14 @@ class SessionMessageStore:
284
288
  # Short assistant messages, user messages, and system messages stored as-is
285
289
  # Store ALL messages in database for full audit trail
286
290
  msg = Message(
291
+ id=message.get("id"), # Use pre-generated ID if provided
287
292
  content=content,
288
293
  message_type=message.get("role", "user"),
289
294
  session_id=session_id,
290
295
  tenant_id=self.user_id, # Set tenant_id to user_id (application scoped to user)
291
296
  user_id=user_id or self.user_id,
297
+ trace_id=message.get("trace_id"),
298
+ span_id=message.get("span_id"),
292
299
  metadata={
293
300
  "message_index": idx,
294
301
  "timestamp": message.get("timestamp"),