remdb 0.3.114__py3-none-any.whl → 0.3.172__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of remdb might be problematic. Click here for more details.
- rem/agentic/agents/__init__.py +16 -0
- rem/agentic/agents/agent_manager.py +311 -0
- rem/agentic/agents/sse_simulator.py +2 -0
- rem/agentic/context.py +103 -5
- rem/agentic/context_builder.py +36 -9
- rem/agentic/mcp/tool_wrapper.py +161 -18
- rem/agentic/otel/setup.py +1 -0
- rem/agentic/providers/phoenix.py +371 -108
- rem/agentic/providers/pydantic_ai.py +172 -30
- rem/agentic/schema.py +8 -4
- rem/api/deps.py +3 -5
- rem/api/main.py +26 -4
- rem/api/mcp_router/resources.py +15 -10
- rem/api/mcp_router/server.py +11 -3
- rem/api/mcp_router/tools.py +418 -4
- rem/api/middleware/tracking.py +5 -5
- rem/api/routers/admin.py +218 -1
- rem/api/routers/auth.py +349 -6
- rem/api/routers/chat/completions.py +255 -7
- rem/api/routers/chat/models.py +81 -7
- rem/api/routers/chat/otel_utils.py +33 -0
- rem/api/routers/chat/sse_events.py +17 -1
- rem/api/routers/chat/streaming.py +126 -19
- rem/api/routers/feedback.py +134 -14
- rem/api/routers/messages.py +24 -15
- rem/api/routers/query.py +6 -3
- rem/auth/__init__.py +13 -3
- rem/auth/jwt.py +352 -0
- rem/auth/middleware.py +115 -10
- rem/auth/providers/__init__.py +4 -1
- rem/auth/providers/email.py +215 -0
- rem/cli/commands/README.md +42 -0
- rem/cli/commands/cluster.py +617 -168
- rem/cli/commands/configure.py +4 -7
- rem/cli/commands/db.py +66 -22
- rem/cli/commands/experiments.py +468 -76
- rem/cli/commands/schema.py +6 -5
- rem/cli/commands/session.py +336 -0
- rem/cli/dreaming.py +2 -2
- rem/cli/main.py +2 -0
- rem/config.py +8 -1
- rem/models/core/experiment.py +58 -14
- rem/models/entities/__init__.py +4 -0
- rem/models/entities/ontology.py +1 -1
- rem/models/entities/ontology_config.py +1 -1
- rem/models/entities/subscriber.py +175 -0
- rem/models/entities/user.py +1 -0
- rem/schemas/agents/core/agent-builder.yaml +235 -0
- rem/schemas/agents/examples/contract-analyzer.yaml +1 -1
- rem/schemas/agents/examples/contract-extractor.yaml +1 -1
- rem/schemas/agents/examples/cv-parser.yaml +1 -1
- rem/services/__init__.py +3 -1
- rem/services/content/service.py +4 -3
- rem/services/email/__init__.py +10 -0
- rem/services/email/service.py +513 -0
- rem/services/email/templates.py +360 -0
- rem/services/phoenix/client.py +59 -18
- rem/services/postgres/README.md +38 -0
- rem/services/postgres/diff_service.py +127 -6
- rem/services/postgres/pydantic_to_sqlalchemy.py +45 -13
- rem/services/postgres/repository.py +5 -4
- rem/services/postgres/schema_generator.py +205 -4
- rem/services/session/compression.py +120 -50
- rem/services/session/reload.py +14 -7
- rem/services/user_service.py +41 -9
- rem/settings.py +442 -23
- rem/sql/migrations/001_install.sql +156 -0
- rem/sql/migrations/002_install_models.sql +1951 -88
- rem/sql/migrations/004_cache_system.sql +548 -0
- rem/sql/migrations/005_schema_update.sql +145 -0
- rem/utils/README.md +45 -0
- rem/utils/__init__.py +18 -0
- rem/utils/files.py +157 -1
- rem/utils/schema_loader.py +139 -10
- rem/utils/sql_paths.py +146 -0
- rem/utils/vision.py +1 -1
- rem/workers/__init__.py +3 -1
- rem/workers/db_listener.py +579 -0
- rem/workers/unlogged_maintainer.py +463 -0
- {remdb-0.3.114.dist-info → remdb-0.3.172.dist-info}/METADATA +218 -180
- {remdb-0.3.114.dist-info → remdb-0.3.172.dist-info}/RECORD +83 -68
- {remdb-0.3.114.dist-info → remdb-0.3.172.dist-info}/WHEEL +0 -0
- {remdb-0.3.114.dist-info → remdb-0.3.172.dist-info}/entry_points.txt +0 -0
|
@@ -1,13 +1,94 @@
|
|
|
1
1
|
"""
|
|
2
2
|
OpenAI-compatible chat completions router for REM.
|
|
3
3
|
|
|
4
|
-
|
|
5
|
-
|
|
4
|
+
Quick Start (Local Development)
|
|
5
|
+
===============================
|
|
6
|
+
|
|
7
|
+
NOTE: Local dev uses LOCAL databases (Postgres via Docker Compose on port 5050).
|
|
8
|
+
Do NOT port-forward databases. Only port-forward observability services.
|
|
9
|
+
|
|
10
|
+
IMPORTANT: Session IDs MUST be UUIDs. Non-UUID session IDs will cause message
|
|
11
|
+
storage issues and feedback will not work correctly.
|
|
12
|
+
|
|
13
|
+
1. Port Forwarding (REQUIRED for trace capture and Phoenix sync):
|
|
14
|
+
|
|
15
|
+
# Terminal 1: OTEL Collector (HTTP) - sends traces to Phoenix
|
|
16
|
+
kubectl port-forward -n observability svc/otel-collector-collector 4318:4318
|
|
17
|
+
|
|
18
|
+
# Terminal 2: Phoenix UI - view traces at http://localhost:6006
|
|
19
|
+
kubectl port-forward -n siggy svc/phoenix 6006:6006
|
|
20
|
+
|
|
21
|
+
2. Get Phoenix API Key (REQUIRED for feedback->Phoenix sync):
|
|
22
|
+
|
|
23
|
+
export PHOENIX_API_KEY=$(kubectl get secret -n siggy rem-phoenix-api-key \\
|
|
24
|
+
-o jsonpath='{.data.PHOENIX_API_KEY}' | base64 -d)
|
|
25
|
+
|
|
26
|
+
3. Start API with OTEL and Phoenix enabled:
|
|
27
|
+
|
|
28
|
+
cd /path/to/remstack/rem
|
|
29
|
+
source .venv/bin/activate
|
|
30
|
+
OTEL__ENABLED=true \\
|
|
31
|
+
PHOENIX__ENABLED=true \\
|
|
32
|
+
PHOENIX_API_KEY="$PHOENIX_API_KEY" \\
|
|
33
|
+
uvicorn rem.api.main:app --host 0.0.0.0 --port 8000 --app-dir src
|
|
34
|
+
|
|
35
|
+
4. Test Chat Request (session_id MUST be a UUID):
|
|
36
|
+
|
|
37
|
+
SESSION_ID=$(python3 -c "import uuid; print(uuid.uuid4())")
|
|
38
|
+
curl -s -N -X POST http://localhost:8000/api/v1/chat/completions \\
|
|
39
|
+
-H 'Content-Type: application/json' \\
|
|
40
|
+
-H "X-Session-Id: $SESSION_ID" \\
|
|
41
|
+
-H 'X-Agent-Schema: rem' \\
|
|
42
|
+
-d '{"messages": [{"role": "user", "content": "Hello"}], "stream": true}'
|
|
43
|
+
|
|
44
|
+
# Note: Use 'rem' agent schema (default) for real LLM responses.
|
|
45
|
+
# The 'simulator' agent is for testing SSE events without LLM calls.
|
|
46
|
+
|
|
47
|
+
5. Submit Feedback on Response:
|
|
48
|
+
|
|
49
|
+
The metadata SSE event contains message_id and trace_id for feedback:
|
|
50
|
+
event: metadata
|
|
51
|
+
data: {"message_id": "728882f8-...", "trace_id": "e53c701c...", ...}
|
|
52
|
+
|
|
53
|
+
Use session_id (UUID you generated) and message_id to submit feedback:
|
|
54
|
+
|
|
55
|
+
curl -X POST http://localhost:8000/api/v1/messages/feedback \\
|
|
56
|
+
-H 'Content-Type: application/json' \\
|
|
57
|
+
-H 'X-Tenant-Id: default' \\
|
|
58
|
+
-d '{
|
|
59
|
+
"session_id": "<your-uuid-session-id>",
|
|
60
|
+
"message_id": "<message-id-from-metadata>",
|
|
61
|
+
"rating": 1,
|
|
62
|
+
"categories": ["helpful"],
|
|
63
|
+
"comment": "Good response"
|
|
64
|
+
}'
|
|
65
|
+
|
|
66
|
+
Expected response (201 = synced to Phoenix):
|
|
67
|
+
{"phoenix_synced": true, "trace_id": "e53c701c...", "span_id": "6432d497..."}
|
|
68
|
+
|
|
69
|
+
OTEL Architecture
|
|
70
|
+
=================
|
|
71
|
+
|
|
72
|
+
REM API --[OTLP/HTTP]--> OTEL Collector --[relay]--> Phoenix
|
|
73
|
+
(port 4318) (k8s: observability) (k8s: siggy)
|
|
74
|
+
|
|
75
|
+
Environment Variables:
|
|
76
|
+
OTEL__ENABLED=true Enable OTEL tracing (required for trace capture)
|
|
77
|
+
PHOENIX__ENABLED=true Enable Phoenix integration (required for feedback sync)
|
|
78
|
+
PHOENIX_API_KEY=<jwt> Phoenix API key (required for feedback->Phoenix sync)
|
|
79
|
+
OTEL__COLLECTOR_ENDPOINT Default: http://localhost:4318
|
|
80
|
+
OTEL__PROTOCOL Default: http (use port 4318, not gRPC 4317)
|
|
81
|
+
|
|
82
|
+
Design Pattern
|
|
83
|
+
==============
|
|
84
|
+
|
|
85
|
+
- Headers map to AgentContext (X-User-Id, X-Tenant-Id, X-Session-Id, X-Agent-Schema, X-Is-Eval)
|
|
6
86
|
- ContextBuilder centralizes message construction with user profile + session history
|
|
7
87
|
- Body.model is the LLM model for Pydantic AI
|
|
8
88
|
- X-Agent-Schema header specifies which agent schema to use (defaults to 'rem')
|
|
9
89
|
- Support for streaming (SSE) and non-streaming modes
|
|
10
90
|
- Response format control (text vs json_object)
|
|
91
|
+
- OpenAI-compatible body fields: metadata, store, reasoning_effort, etc.
|
|
11
92
|
|
|
12
93
|
Context Building Flow:
|
|
13
94
|
1. ContextBuilder.build_from_headers() extracts user_id, session_id from headers
|
|
@@ -16,7 +97,7 @@ Context Building Flow:
|
|
|
16
97
|
- Long messages include REM LOOKUP hints: "... [REM LOOKUP session-{id}-msg-{index}] ..."
|
|
17
98
|
- Agent can retrieve full content on-demand using REM LOOKUP
|
|
18
99
|
3. User profile provided as REM LOOKUP hint (on-demand by default)
|
|
19
|
-
- Agent receives: "User
|
|
100
|
+
- Agent receives: "User: {email}. To load user profile: Use REM LOOKUP \"{email}\""
|
|
20
101
|
- Agent decides whether to load profile based on query
|
|
21
102
|
4. If CHAT__AUTO_INJECT_USER_CONTEXT=true: User profile auto-loaded and injected
|
|
22
103
|
5. Combines: system context + compressed session history + new messages
|
|
@@ -25,9 +106,10 @@ Context Building Flow:
|
|
|
25
106
|
Headers Mapping
|
|
26
107
|
X-User-Id → AgentContext.user_id
|
|
27
108
|
X-Tenant-Id → AgentContext.tenant_id
|
|
28
|
-
X-Session-Id → AgentContext.session_id
|
|
109
|
+
X-Session-Id → AgentContext.session_id (use UUID for new sessions)
|
|
29
110
|
X-Model-Name → AgentContext.default_model (overrides body.model)
|
|
30
111
|
X-Agent-Schema → AgentContext.agent_schema_uri (defaults to 'rem')
|
|
112
|
+
X-Is-Eval → AgentContext.is_eval (sets session mode to EVALUATION)
|
|
31
113
|
|
|
32
114
|
Default Agent:
|
|
33
115
|
If X-Agent-Schema header is not provided, the system loads 'rem' schema,
|
|
@@ -42,6 +124,7 @@ Example Request:
|
|
|
42
124
|
POST /api/v1/chat/completions
|
|
43
125
|
X-Tenant-Id: acme-corp
|
|
44
126
|
X-User-Id: user123
|
|
127
|
+
X-Session-Id: a1b2c3d4-e5f6-7890-abcd-ef1234567890 # UUID
|
|
45
128
|
X-Agent-Schema: rem # Optional, this is the default
|
|
46
129
|
|
|
47
130
|
{
|
|
@@ -67,7 +150,9 @@ from loguru import logger
|
|
|
67
150
|
from ....agentic.context import AgentContext
|
|
68
151
|
from ....agentic.context_builder import ContextBuilder
|
|
69
152
|
from ....agentic.providers.pydantic_ai import create_agent
|
|
153
|
+
from ....models.entities.session import Session, SessionMode
|
|
70
154
|
from ....services.audio.transcriber import AudioTranscriber
|
|
155
|
+
from ....services.postgres.repository import Repository
|
|
71
156
|
from ....services.session import SessionMessageStore, reload_session
|
|
72
157
|
from ....settings import settings
|
|
73
158
|
from ....utils.schema_loader import load_agent_schema, load_agent_schema_async
|
|
@@ -87,6 +172,105 @@ router = APIRouter(prefix="/api/v1", tags=["chat"])
|
|
|
87
172
|
DEFAULT_AGENT_SCHEMA = "rem"
|
|
88
173
|
|
|
89
174
|
|
|
175
|
+
def get_current_trace_context() -> tuple[str | None, str | None]:
|
|
176
|
+
"""Get trace_id and span_id from current OTEL context.
|
|
177
|
+
|
|
178
|
+
Returns:
|
|
179
|
+
Tuple of (trace_id, span_id) as hex strings, or (None, None) if not available.
|
|
180
|
+
"""
|
|
181
|
+
try:
|
|
182
|
+
from opentelemetry import trace
|
|
183
|
+
span = trace.get_current_span()
|
|
184
|
+
if span and span.get_span_context().is_valid:
|
|
185
|
+
ctx = span.get_span_context()
|
|
186
|
+
trace_id = format(ctx.trace_id, '032x')
|
|
187
|
+
span_id = format(ctx.span_id, '016x')
|
|
188
|
+
return trace_id, span_id
|
|
189
|
+
except Exception:
|
|
190
|
+
pass
|
|
191
|
+
return None, None
|
|
192
|
+
|
|
193
|
+
|
|
194
|
+
def get_tracer():
|
|
195
|
+
"""Get the OpenTelemetry tracer for chat completions."""
|
|
196
|
+
try:
|
|
197
|
+
from opentelemetry import trace
|
|
198
|
+
return trace.get_tracer("rem.chat.completions")
|
|
199
|
+
except Exception:
|
|
200
|
+
return None
|
|
201
|
+
|
|
202
|
+
|
|
203
|
+
async def ensure_session_with_metadata(
|
|
204
|
+
session_id: str,
|
|
205
|
+
user_id: str | None,
|
|
206
|
+
tenant_id: str,
|
|
207
|
+
is_eval: bool,
|
|
208
|
+
request_metadata: dict[str, str] | None,
|
|
209
|
+
agent_schema: str | None = None,
|
|
210
|
+
) -> None:
|
|
211
|
+
"""
|
|
212
|
+
Ensure session exists and update with metadata/mode.
|
|
213
|
+
|
|
214
|
+
If X-Is-Eval header is true, sets session mode to EVALUATION.
|
|
215
|
+
Merges request metadata with existing session metadata.
|
|
216
|
+
|
|
217
|
+
Args:
|
|
218
|
+
session_id: Session identifier (maps to Session.name)
|
|
219
|
+
user_id: User identifier
|
|
220
|
+
tenant_id: Tenant identifier
|
|
221
|
+
is_eval: Whether this is an evaluation session
|
|
222
|
+
request_metadata: Metadata from request body to merge
|
|
223
|
+
agent_schema: Optional agent schema being used
|
|
224
|
+
"""
|
|
225
|
+
if not settings.postgres.enabled:
|
|
226
|
+
return
|
|
227
|
+
|
|
228
|
+
try:
|
|
229
|
+
repo = Repository(Session, table_name="sessions")
|
|
230
|
+
|
|
231
|
+
# Try to load existing session by name (session_id is the name field)
|
|
232
|
+
existing_list = await repo.find(
|
|
233
|
+
filters={"name": session_id, "tenant_id": tenant_id},
|
|
234
|
+
limit=1,
|
|
235
|
+
)
|
|
236
|
+
existing = existing_list[0] if existing_list else None
|
|
237
|
+
|
|
238
|
+
if existing:
|
|
239
|
+
# Merge metadata if provided
|
|
240
|
+
merged_metadata = existing.metadata or {}
|
|
241
|
+
if request_metadata:
|
|
242
|
+
merged_metadata.update(request_metadata)
|
|
243
|
+
|
|
244
|
+
# Update session if eval flag or new metadata
|
|
245
|
+
needs_update = False
|
|
246
|
+
if is_eval and existing.mode != SessionMode.EVALUATION:
|
|
247
|
+
existing.mode = SessionMode.EVALUATION
|
|
248
|
+
needs_update = True
|
|
249
|
+
if request_metadata:
|
|
250
|
+
existing.metadata = merged_metadata
|
|
251
|
+
needs_update = True
|
|
252
|
+
|
|
253
|
+
if needs_update:
|
|
254
|
+
await repo.upsert(existing)
|
|
255
|
+
logger.debug(f"Updated session {session_id} (eval={is_eval}, metadata keys={list(merged_metadata.keys())})")
|
|
256
|
+
else:
|
|
257
|
+
# Create new session
|
|
258
|
+
session = Session(
|
|
259
|
+
name=session_id,
|
|
260
|
+
mode=SessionMode.EVALUATION if is_eval else SessionMode.NORMAL,
|
|
261
|
+
user_id=user_id,
|
|
262
|
+
tenant_id=tenant_id,
|
|
263
|
+
agent_schema_uri=agent_schema,
|
|
264
|
+
metadata=request_metadata or {},
|
|
265
|
+
)
|
|
266
|
+
await repo.upsert(session)
|
|
267
|
+
logger.info(f"Created session {session_id} (eval={is_eval})")
|
|
268
|
+
|
|
269
|
+
except Exception as e:
|
|
270
|
+
# Non-critical - log but don't fail the request
|
|
271
|
+
logger.error(f"Failed to ensure session metadata: {e}", exc_info=True)
|
|
272
|
+
|
|
273
|
+
|
|
90
274
|
@router.post("/chat/completions", response_model=None)
|
|
91
275
|
async def chat_completions(body: ChatCompletionRequest, request: Request):
|
|
92
276
|
"""
|
|
@@ -102,6 +286,17 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
|
|
|
102
286
|
| X-Tenant-Id | Tenant identifier (multi-tenancy) | AgentContext.tenant_id | "default" |
|
|
103
287
|
| X-Session-Id | Session/conversation identifier | AgentContext.session_id | None |
|
|
104
288
|
| X-Agent-Schema | Agent schema name | AgentContext.agent_schema_uri | "rem" |
|
|
289
|
+
| X-Is-Eval | Mark as evaluation session | AgentContext.is_eval | false |
|
|
290
|
+
|
|
291
|
+
Additional OpenAI-compatible Body Fields:
|
|
292
|
+
- metadata: Key-value pairs merged with session metadata (max 16 keys)
|
|
293
|
+
- store: Whether to store for distillation/evaluation
|
|
294
|
+
- max_completion_tokens: Max tokens to generate (replaces max_tokens)
|
|
295
|
+
- seed: Seed for deterministic sampling
|
|
296
|
+
- top_p: Nucleus sampling probability
|
|
297
|
+
- logprobs: Return log probabilities
|
|
298
|
+
- reasoning_effort: low/medium/high for o-series models
|
|
299
|
+
- service_tier: auto/flex/priority/default
|
|
105
300
|
|
|
106
301
|
Example Models:
|
|
107
302
|
- anthropic:claude-sonnet-4-5-20250929 (Claude 4.5 Sonnet)
|
|
@@ -127,10 +322,16 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
|
|
|
127
322
|
- If CHAT__AUTO_INJECT_USER_CONTEXT=true: User profile auto-loaded and injected
|
|
128
323
|
- New messages saved to database with compression for session continuity
|
|
129
324
|
- When Postgres is disabled, session management is skipped
|
|
325
|
+
|
|
326
|
+
Evaluation Sessions:
|
|
327
|
+
- Set X-Is-Eval: true header to mark session as evaluation
|
|
328
|
+
- Session mode will be set to EVALUATION
|
|
329
|
+
- Request metadata is merged with session metadata
|
|
330
|
+
- Useful for A/B testing, model comparison, and feedback collection
|
|
130
331
|
"""
|
|
131
332
|
# Load agent schema: use header value from context or default
|
|
132
|
-
# Extract AgentContext
|
|
133
|
-
temp_context = AgentContext.
|
|
333
|
+
# Extract AgentContext from request (gets user_id from JWT token)
|
|
334
|
+
temp_context = AgentContext.from_request(request)
|
|
134
335
|
schema_name = temp_context.agent_schema_uri or DEFAULT_AGENT_SCHEMA
|
|
135
336
|
|
|
136
337
|
# Resolve model: use body.model if provided, otherwise settings default
|
|
@@ -149,8 +350,20 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
|
|
|
149
350
|
context, messages = await ContextBuilder.build_from_headers(
|
|
150
351
|
headers=dict(request.headers),
|
|
151
352
|
new_messages=new_messages,
|
|
353
|
+
user_id=temp_context.user_id, # From JWT token (source of truth)
|
|
152
354
|
)
|
|
153
355
|
|
|
356
|
+
# Ensure session exists with metadata and eval mode if applicable
|
|
357
|
+
if context.session_id:
|
|
358
|
+
await ensure_session_with_metadata(
|
|
359
|
+
session_id=context.session_id,
|
|
360
|
+
user_id=context.user_id,
|
|
361
|
+
tenant_id=context.tenant_id,
|
|
362
|
+
is_eval=context.is_eval,
|
|
363
|
+
request_metadata=body.metadata,
|
|
364
|
+
agent_schema="simulator",
|
|
365
|
+
)
|
|
366
|
+
|
|
154
367
|
# Get the last user message as prompt
|
|
155
368
|
prompt = body.messages[-1].content if body.messages else "demo"
|
|
156
369
|
request_id = f"sim-{uuid.uuid4().hex[:24]}"
|
|
@@ -297,10 +510,22 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
|
|
|
297
510
|
context, messages = await ContextBuilder.build_from_headers(
|
|
298
511
|
headers=dict(request.headers),
|
|
299
512
|
new_messages=new_messages,
|
|
513
|
+
user_id=temp_context.user_id, # From JWT token (source of truth)
|
|
300
514
|
)
|
|
301
515
|
|
|
302
516
|
logger.info(f"Built context with {len(messages)} total messages (includes history + user context)")
|
|
303
517
|
|
|
518
|
+
# Ensure session exists with metadata and eval mode if applicable
|
|
519
|
+
if context.session_id:
|
|
520
|
+
await ensure_session_with_metadata(
|
|
521
|
+
session_id=context.session_id,
|
|
522
|
+
user_id=context.user_id,
|
|
523
|
+
tenant_id=context.tenant_id,
|
|
524
|
+
is_eval=context.is_eval,
|
|
525
|
+
request_metadata=body.metadata,
|
|
526
|
+
agent_schema=schema_name,
|
|
527
|
+
)
|
|
528
|
+
|
|
304
529
|
# Create agent with schema and model override
|
|
305
530
|
agent = await create_agent(
|
|
306
531
|
context=context,
|
|
@@ -351,7 +576,26 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
|
|
|
351
576
|
)
|
|
352
577
|
|
|
353
578
|
# Non-streaming mode
|
|
354
|
-
|
|
579
|
+
# Create a parent span to capture trace context for message storage
|
|
580
|
+
trace_id, span_id = None, None
|
|
581
|
+
tracer = get_tracer()
|
|
582
|
+
|
|
583
|
+
if tracer:
|
|
584
|
+
with tracer.start_as_current_span(
|
|
585
|
+
"chat_completion",
|
|
586
|
+
attributes={
|
|
587
|
+
"session.id": context.session_id or "",
|
|
588
|
+
"user.id": context.user_id or "",
|
|
589
|
+
"model": body.model,
|
|
590
|
+
"agent.schema": context.agent_schema_uri or DEFAULT_AGENT_SCHEMA,
|
|
591
|
+
}
|
|
592
|
+
) as span:
|
|
593
|
+
# Capture trace context from the span we just created
|
|
594
|
+
trace_id, span_id = get_current_trace_context()
|
|
595
|
+
result = await agent.run(prompt)
|
|
596
|
+
else:
|
|
597
|
+
# No tracer available, run without tracing
|
|
598
|
+
result = await agent.run(prompt)
|
|
355
599
|
|
|
356
600
|
# Determine content format based on response_format request
|
|
357
601
|
if body.response_format and body.response_format.type == "json_object":
|
|
@@ -374,12 +618,16 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
|
|
|
374
618
|
"role": "user",
|
|
375
619
|
"content": body.messages[-1].content if body.messages else "",
|
|
376
620
|
"timestamp": datetime.utcnow().isoformat(),
|
|
621
|
+
"trace_id": trace_id,
|
|
622
|
+
"span_id": span_id,
|
|
377
623
|
}
|
|
378
624
|
|
|
379
625
|
assistant_message = {
|
|
380
626
|
"role": "assistant",
|
|
381
627
|
"content": content,
|
|
382
628
|
"timestamp": datetime.utcnow().isoformat(),
|
|
629
|
+
"trace_id": trace_id,
|
|
630
|
+
"span_id": span_id,
|
|
383
631
|
}
|
|
384
632
|
|
|
385
633
|
try:
|
rem/api/routers/chat/models.py
CHANGED
|
@@ -1,14 +1,38 @@
|
|
|
1
1
|
"""
|
|
2
2
|
OpenAI-compatible API models for chat completions.
|
|
3
3
|
|
|
4
|
-
Design Pattern
|
|
4
|
+
Design Pattern:
|
|
5
5
|
- Full OpenAI compatibility for drop-in replacement
|
|
6
6
|
- Support for streaming (SSE) and non-streaming modes
|
|
7
7
|
- Response format control (text vs json_object)
|
|
8
|
-
- Headers map to AgentContext
|
|
8
|
+
- Headers map to AgentContext for session/context control
|
|
9
|
+
- Body fields for OpenAI-compatible parameters + metadata
|
|
10
|
+
|
|
11
|
+
Headers (context control):
|
|
12
|
+
X-User-Id → context.user_id (user identifier)
|
|
13
|
+
X-Tenant-Id → context.tenant_id (multi-tenancy, default: "default")
|
|
14
|
+
X-Session-Id → context.session_id (conversation continuity)
|
|
15
|
+
X-Agent-Schema → context.agent_schema_uri (which agent to use, default: "rem")
|
|
16
|
+
X-Model-Name → context.default_model (model override)
|
|
17
|
+
X-Chat-Is-Audio → triggers audio transcription ("true"/"false")
|
|
18
|
+
X-Is-Eval → context.is_eval (marks session as evaluation, sets mode=EVALUATION)
|
|
19
|
+
|
|
20
|
+
Body Fields (OpenAI-compatible + extensions):
|
|
21
|
+
model → LLM model (e.g., "openai:gpt-4.1", "anthropic:claude-sonnet-4-5-20250929")
|
|
22
|
+
messages → Chat conversation history
|
|
23
|
+
temperature → Sampling temperature (0-2)
|
|
24
|
+
max_tokens → Max tokens (deprecated, use max_completion_tokens)
|
|
25
|
+
max_completion_tokens → Max tokens to generate
|
|
26
|
+
stream → Enable SSE streaming
|
|
27
|
+
metadata → Key-value pairs merged with session metadata (for evals/experiments)
|
|
28
|
+
store → Whether to store for distillation/evaluation
|
|
29
|
+
seed → Deterministic sampling seed
|
|
30
|
+
top_p → Nucleus sampling probability
|
|
31
|
+
reasoning_effort → low/medium/high for o-series models
|
|
32
|
+
service_tier → auto/flex/priority/default
|
|
9
33
|
"""
|
|
10
34
|
|
|
11
|
-
from typing import Literal
|
|
35
|
+
from typing import Any, Literal
|
|
12
36
|
|
|
13
37
|
from pydantic import BaseModel, Field
|
|
14
38
|
|
|
@@ -46,10 +70,17 @@ class ChatCompletionRequest(BaseModel):
|
|
|
46
70
|
Compatible with OpenAI's /v1/chat/completions endpoint.
|
|
47
71
|
|
|
48
72
|
Headers Map to AgentContext:
|
|
49
|
-
|
|
50
|
-
|
|
51
|
-
|
|
52
|
-
|
|
73
|
+
X-User-Id → context.user_id
|
|
74
|
+
X-Tenant-Id → context.tenant_id (default: "default")
|
|
75
|
+
X-Session-Id → context.session_id
|
|
76
|
+
X-Agent-Schema → context.agent_schema_uri (default: "rem")
|
|
77
|
+
X-Model-Name → context.default_model
|
|
78
|
+
X-Chat-Is-Audio → triggers audio transcription
|
|
79
|
+
X-Is-Eval → context.is_eval (sets session mode=EVALUATION)
|
|
80
|
+
|
|
81
|
+
Body Fields for Metadata/Evals:
|
|
82
|
+
metadata → Key-value pairs merged with session metadata
|
|
83
|
+
store → Whether to store for distillation/evaluation
|
|
53
84
|
|
|
54
85
|
Note: Model is specified in body.model (standard OpenAI field), not headers.
|
|
55
86
|
"""
|
|
@@ -73,6 +104,49 @@ class ChatCompletionRequest(BaseModel):
|
|
|
73
104
|
default=None,
|
|
74
105
|
description="Response format. Set type='json_object' to enable JSON mode.",
|
|
75
106
|
)
|
|
107
|
+
# Additional OpenAI-compatible fields
|
|
108
|
+
metadata: dict[str, str] | None = Field(
|
|
109
|
+
default=None,
|
|
110
|
+
description="Key-value pairs attached to the request (max 16 keys, 64/512 char limits). "
|
|
111
|
+
"Merged with session metadata for persistence.",
|
|
112
|
+
)
|
|
113
|
+
store: bool | None = Field(
|
|
114
|
+
default=None,
|
|
115
|
+
description="Whether to store for distillation/evaluation purposes.",
|
|
116
|
+
)
|
|
117
|
+
max_completion_tokens: int | None = Field(
|
|
118
|
+
default=None,
|
|
119
|
+
ge=1,
|
|
120
|
+
description="Max tokens to generate (replaces deprecated max_tokens).",
|
|
121
|
+
)
|
|
122
|
+
seed: int | None = Field(
|
|
123
|
+
default=None,
|
|
124
|
+
description="Seed for deterministic sampling (best effort).",
|
|
125
|
+
)
|
|
126
|
+
top_p: float | None = Field(
|
|
127
|
+
default=None,
|
|
128
|
+
ge=0,
|
|
129
|
+
le=1,
|
|
130
|
+
description="Nucleus sampling probability. Use temperature OR top_p, not both.",
|
|
131
|
+
)
|
|
132
|
+
logprobs: bool | None = Field(
|
|
133
|
+
default=None,
|
|
134
|
+
description="Whether to return log probabilities for output tokens.",
|
|
135
|
+
)
|
|
136
|
+
top_logprobs: int | None = Field(
|
|
137
|
+
default=None,
|
|
138
|
+
ge=0,
|
|
139
|
+
le=20,
|
|
140
|
+
description="Number of most likely tokens to return at each position (requires logprobs=true).",
|
|
141
|
+
)
|
|
142
|
+
reasoning_effort: Literal["low", "medium", "high"] | None = Field(
|
|
143
|
+
default=None,
|
|
144
|
+
description="Reasoning effort for o-series models (low/medium/high).",
|
|
145
|
+
)
|
|
146
|
+
service_tier: Literal["auto", "flex", "priority", "default"] | None = Field(
|
|
147
|
+
default=None,
|
|
148
|
+
description="Service tier for processing (flex is 50% cheaper but slower).",
|
|
149
|
+
)
|
|
76
150
|
|
|
77
151
|
|
|
78
152
|
# Response models
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
"""OTEL utilities for chat routers."""
|
|
2
|
+
|
|
3
|
+
from loguru import logger
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
def get_tracer():
|
|
7
|
+
"""Get the OpenTelemetry tracer for chat completions."""
|
|
8
|
+
try:
|
|
9
|
+
from opentelemetry import trace
|
|
10
|
+
return trace.get_tracer("rem.chat.completions")
|
|
11
|
+
except Exception:
|
|
12
|
+
return None
|
|
13
|
+
|
|
14
|
+
|
|
15
|
+
def get_current_trace_context() -> tuple[str | None, str | None]:
|
|
16
|
+
"""Get trace_id and span_id from current OTEL context.
|
|
17
|
+
|
|
18
|
+
Returns:
|
|
19
|
+
Tuple of (trace_id, span_id) as hex strings, or (None, None) if not available.
|
|
20
|
+
"""
|
|
21
|
+
try:
|
|
22
|
+
from opentelemetry import trace
|
|
23
|
+
|
|
24
|
+
span = trace.get_current_span()
|
|
25
|
+
ctx = span.get_span_context()
|
|
26
|
+
if ctx.is_valid:
|
|
27
|
+
trace_id = format(ctx.trace_id, '032x')
|
|
28
|
+
span_id = format(ctx.span_id, '016x')
|
|
29
|
+
return trace_id, span_id
|
|
30
|
+
except Exception as e:
|
|
31
|
+
logger.debug(f"Could not get trace context: {e}")
|
|
32
|
+
|
|
33
|
+
return None, None
|
|
@@ -321,7 +321,13 @@ class MetadataEvent(BaseModel):
|
|
|
321
321
|
# Agent info
|
|
322
322
|
agent_schema: str | None = Field(
|
|
323
323
|
default=None,
|
|
324
|
-
description="Name of the agent schema used for this response (e.g., 'rem', '
|
|
324
|
+
description="Name of the agent schema used for this response (e.g., 'rem', 'query-assistant')"
|
|
325
|
+
)
|
|
326
|
+
|
|
327
|
+
# Session info
|
|
328
|
+
session_name: str | None = Field(
|
|
329
|
+
default=None,
|
|
330
|
+
description="Short 1-3 phrase name for the session topic (e.g., 'Prescription Drug Questions', 'AWS Setup Help')"
|
|
325
331
|
)
|
|
326
332
|
|
|
327
333
|
# Quality indicators
|
|
@@ -350,6 +356,16 @@ class MetadataEvent(BaseModel):
|
|
|
350
356
|
description="Token count for this response"
|
|
351
357
|
)
|
|
352
358
|
|
|
359
|
+
# Trace context for observability (deterministic, captured from OTEL)
|
|
360
|
+
trace_id: str | None = Field(
|
|
361
|
+
default=None,
|
|
362
|
+
description="OTEL trace ID for correlating with Phoenix/observability systems"
|
|
363
|
+
)
|
|
364
|
+
span_id: str | None = Field(
|
|
365
|
+
default=None,
|
|
366
|
+
description="OTEL span ID for correlating with Phoenix/observability systems"
|
|
367
|
+
)
|
|
368
|
+
|
|
353
369
|
# System flags
|
|
354
370
|
flags: list[str] | None = Field(
|
|
355
371
|
default=None,
|