remdb 0.3.0__py3-none-any.whl → 0.3.114__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

Files changed (98) hide show
  1. rem/__init__.py +129 -2
  2. rem/agentic/README.md +76 -0
  3. rem/agentic/__init__.py +15 -0
  4. rem/agentic/agents/__init__.py +16 -2
  5. rem/agentic/agents/sse_simulator.py +500 -0
  6. rem/agentic/context.py +28 -22
  7. rem/agentic/llm_provider_models.py +301 -0
  8. rem/agentic/otel/setup.py +92 -4
  9. rem/agentic/providers/phoenix.py +32 -43
  10. rem/agentic/providers/pydantic_ai.py +142 -22
  11. rem/agentic/schema.py +358 -21
  12. rem/agentic/tools/rem_tools.py +3 -3
  13. rem/api/README.md +238 -1
  14. rem/api/deps.py +255 -0
  15. rem/api/main.py +151 -37
  16. rem/api/mcp_router/resources.py +1 -1
  17. rem/api/mcp_router/server.py +17 -2
  18. rem/api/mcp_router/tools.py +143 -7
  19. rem/api/middleware/tracking.py +172 -0
  20. rem/api/routers/admin.py +277 -0
  21. rem/api/routers/auth.py +124 -0
  22. rem/api/routers/chat/completions.py +152 -16
  23. rem/api/routers/chat/models.py +7 -3
  24. rem/api/routers/chat/sse_events.py +526 -0
  25. rem/api/routers/chat/streaming.py +608 -45
  26. rem/api/routers/dev.py +81 -0
  27. rem/api/routers/feedback.py +148 -0
  28. rem/api/routers/messages.py +473 -0
  29. rem/api/routers/models.py +78 -0
  30. rem/api/routers/query.py +357 -0
  31. rem/api/routers/shared_sessions.py +406 -0
  32. rem/auth/middleware.py +126 -27
  33. rem/cli/commands/README.md +201 -70
  34. rem/cli/commands/ask.py +13 -10
  35. rem/cli/commands/cluster.py +1359 -0
  36. rem/cli/commands/configure.py +4 -3
  37. rem/cli/commands/db.py +350 -137
  38. rem/cli/commands/experiments.py +76 -72
  39. rem/cli/commands/process.py +22 -15
  40. rem/cli/commands/scaffold.py +47 -0
  41. rem/cli/commands/schema.py +95 -49
  42. rem/cli/main.py +29 -6
  43. rem/config.py +2 -2
  44. rem/models/core/core_model.py +7 -1
  45. rem/models/core/rem_query.py +5 -2
  46. rem/models/entities/__init__.py +21 -0
  47. rem/models/entities/domain_resource.py +38 -0
  48. rem/models/entities/feedback.py +123 -0
  49. rem/models/entities/message.py +30 -1
  50. rem/models/entities/session.py +83 -0
  51. rem/models/entities/shared_session.py +180 -0
  52. rem/models/entities/user.py +10 -3
  53. rem/registry.py +373 -0
  54. rem/schemas/agents/rem.yaml +7 -3
  55. rem/services/content/providers.py +94 -140
  56. rem/services/content/service.py +92 -20
  57. rem/services/dreaming/affinity_service.py +2 -16
  58. rem/services/dreaming/moment_service.py +2 -15
  59. rem/services/embeddings/api.py +24 -17
  60. rem/services/embeddings/worker.py +16 -16
  61. rem/services/phoenix/EXPERIMENT_DESIGN.md +3 -3
  62. rem/services/phoenix/client.py +252 -19
  63. rem/services/postgres/README.md +159 -15
  64. rem/services/postgres/__init__.py +2 -1
  65. rem/services/postgres/diff_service.py +426 -0
  66. rem/services/postgres/pydantic_to_sqlalchemy.py +427 -129
  67. rem/services/postgres/repository.py +132 -0
  68. rem/services/postgres/schema_generator.py +86 -5
  69. rem/services/postgres/service.py +6 -6
  70. rem/services/rate_limit.py +113 -0
  71. rem/services/rem/README.md +14 -0
  72. rem/services/rem/parser.py +44 -9
  73. rem/services/rem/service.py +36 -2
  74. rem/services/session/compression.py +17 -1
  75. rem/services/session/reload.py +1 -1
  76. rem/services/user_service.py +98 -0
  77. rem/settings.py +169 -17
  78. rem/sql/background_indexes.sql +21 -16
  79. rem/sql/migrations/001_install.sql +231 -54
  80. rem/sql/migrations/002_install_models.sql +457 -393
  81. rem/sql/migrations/003_optional_extensions.sql +326 -0
  82. rem/utils/constants.py +97 -0
  83. rem/utils/date_utils.py +228 -0
  84. rem/utils/embeddings.py +17 -4
  85. rem/utils/files.py +167 -0
  86. rem/utils/mime_types.py +158 -0
  87. rem/utils/model_helpers.py +156 -1
  88. rem/utils/schema_loader.py +191 -35
  89. rem/utils/sql_types.py +3 -1
  90. rem/utils/vision.py +9 -14
  91. rem/workers/README.md +14 -14
  92. rem/workers/db_maintainer.py +74 -0
  93. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/METADATA +303 -164
  94. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/RECORD +96 -70
  95. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/WHEEL +1 -1
  96. rem/sql/002_install_models.sql +0 -1068
  97. rem/sql/install_models.sql +0 -1038
  98. {remdb-0.3.0.dist-info → remdb-0.3.114.dist-info}/entry_points.txt +0 -0
@@ -0,0 +1,500 @@
1
+ """
2
+ SSE Event Simulator Agent.
3
+
4
+ A programmatic simulator that generates rich SSE events for testing and
5
+ demonstrating the streaming protocol. NOT an LLM-based agent - this is
6
+ pure Python that emits scripted SSE events.
7
+
8
+ Usage:
9
+ from rem.agentic.agents.simulator import stream_simulator_events
10
+
11
+ async for event in stream_simulator_events("demo"):
12
+ yield format_sse_event(event)
13
+
14
+ The simulator demonstrates:
15
+ 1. Reasoning events (thinking process)
16
+ 2. Text deltas (streamed content)
17
+ 3. Progress indicators
18
+ 4. Tool call events
19
+ 5. Action solicitations (user interaction)
20
+ 6. Metadata events
21
+ 7. Done event
22
+
23
+ This is useful for:
24
+ - Frontend development without LLM costs
25
+ - Testing SSE parsing and rendering
26
+ - Demonstrating the full event protocol
27
+ - Load testing streaming infrastructure
28
+ """
29
+
30
+ import asyncio
31
+ import time
32
+ import uuid
33
+ from typing import AsyncGenerator
34
+
35
+ from rem.api.routers.chat.sse_events import (
36
+ ReasoningEvent,
37
+ ActionRequestEvent,
38
+ MetadataEvent,
39
+ ProgressEvent,
40
+ ToolCallEvent,
41
+ DoneEvent,
42
+ ActionRequestCard,
43
+ ActionSubmit,
44
+ ActionStyle,
45
+ InputText,
46
+ InputChoiceSet,
47
+ ActionDisplayStyle,
48
+ format_sse_event,
49
+ )
50
+ from rem.api.routers.chat.models import (
51
+ ChatCompletionStreamResponse,
52
+ ChatCompletionStreamChoice,
53
+ ChatCompletionMessageDelta,
54
+ )
55
+
56
+
57
+ # =============================================================================
58
+ # Demo Content
59
+ # =============================================================================
60
+
61
+ DEMO_REASONING_STEPS = [
62
+ "Analyzing the user's request...",
63
+ "Considering the best approach to demonstrate SSE events...",
64
+ "Planning a response that showcases all event types...",
65
+ "Preparing rich markdown content with examples...",
66
+ ]
67
+
68
+ DEMO_MARKDOWN_CONTENT = """# SSE Streaming Demo
69
+
70
+ This response demonstrates the **rich SSE event protocol** with multiple event types streamed in real-time.
71
+
72
+ ## What You're Seeing
73
+
74
+ 1. **Reasoning Events** - The "thinking" process shown in a collapsible section
75
+ 2. **Text Streaming** - This markdown content, streamed word by word
76
+ 3. **Progress Events** - Step indicators during processing
77
+ 4. **Tool Calls** - Simulated tool invocations
78
+ 5. **Action Requests** - Interactive UI elements for user input
79
+
80
+ ## Code Example
81
+
82
+ ```python
83
+ from rem.agentic.agents.simulator import stream_simulator_events
84
+
85
+ async def demo():
86
+ async for event in stream_simulator_events("demo"):
87
+ print(event.type, event)
88
+ ```
89
+
90
+ ## Features Table
91
+
92
+ | Event Type | Purpose | UI Display |
93
+ |------------|---------|------------|
94
+ | `reasoning` | Model thinking | Collapsible section |
95
+ | `text_delta` | Content chunks | Main response area |
96
+ | `progress` | Step indicators | Progress bar |
97
+ | `tool_call` | Tool invocations | Tool status panel |
98
+ | `action_request` | User input | Buttons/forms |
99
+ | `metadata` | System info | Hidden or badge |
100
+
101
+ ## Summary
102
+
103
+ The SSE protocol enables rich, interactive AI experiences beyond simple text streaming. Each event type serves a specific purpose in the UI.
104
+
105
+ """
106
+
107
+ DEMO_TOOL_CALLS = [
108
+ ("search_knowledge", {"query": "SSE streaming best practices"}),
109
+ ("format_response", {"style": "markdown", "include_examples": True}),
110
+ ]
111
+
112
+ DEMO_PROGRESS_STEPS = [
113
+ "Initializing response",
114
+ "Generating content",
115
+ "Formatting output",
116
+ "Preparing actions",
117
+ ]
118
+
119
+
120
+ # =============================================================================
121
+ # Simulator Functions
122
+ # =============================================================================
123
+
124
+ async def stream_simulator_events(
125
+ prompt: str,
126
+ delay_ms: int = 50,
127
+ include_reasoning: bool = True,
128
+ include_progress: bool = True,
129
+ include_tool_calls: bool = True,
130
+ include_actions: bool = True,
131
+ include_metadata: bool = True,
132
+ # Message correlation IDs
133
+ message_id: str | None = None,
134
+ in_reply_to: str | None = None,
135
+ session_id: str | None = None,
136
+ # Model info
137
+ model: str = "simulator-v1.0.0",
138
+ ) -> AsyncGenerator[str, None]:
139
+ """
140
+ Generate a sequence of SSE events simulating an AI response.
141
+
142
+ This is a programmatic simulator - no LLM calls are made.
143
+ Events are yielded in a realistic order with configurable delays.
144
+
145
+ Text content uses OpenAI-compatible format for consistency with real agents.
146
+ Other events (reasoning, progress, tool_call, metadata) use named SSE events.
147
+
148
+ Args:
149
+ prompt: User prompt (used to vary output slightly)
150
+ delay_ms: Delay between events in milliseconds
151
+ include_reasoning: Whether to emit reasoning events
152
+ include_progress: Whether to emit progress events
153
+ include_tool_calls: Whether to emit tool call events
154
+ include_actions: Whether to emit action request at end
155
+ include_metadata: Whether to emit metadata event
156
+ message_id: Database ID of the assistant message being streamed
157
+ in_reply_to: Database ID of the user message this responds to
158
+ session_id: Session ID for conversation correlation
159
+ model: Model name for response metadata
160
+
161
+ Yields:
162
+ SSE-formatted strings ready for HTTP streaming
163
+
164
+ Example:
165
+ ```python
166
+ async for sse_string in stream_simulator_events("demo"):
167
+ print(sse_string)
168
+ ```
169
+ """
170
+ delay = delay_ms / 1000.0
171
+ request_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
172
+ created_at = int(time.time())
173
+ is_first_chunk = True
174
+
175
+ # Phase 1: Reasoning events
176
+ if include_reasoning:
177
+ for i, step in enumerate(DEMO_REASONING_STEPS):
178
+ await asyncio.sleep(delay)
179
+ yield format_sse_event(ReasoningEvent(content=step + "\n", step=i + 1))
180
+
181
+ # Phase 2: Progress - Starting
182
+ if include_progress:
183
+ await asyncio.sleep(delay)
184
+ yield format_sse_event(ProgressEvent(
185
+ step=1,
186
+ total_steps=len(DEMO_PROGRESS_STEPS),
187
+ label=DEMO_PROGRESS_STEPS[0],
188
+ status="in_progress"
189
+ ))
190
+
191
+ # Phase 3: Tool calls
192
+ if include_tool_calls:
193
+ for tool_name, args in DEMO_TOOL_CALLS:
194
+ tool_id = f"call_{uuid.uuid4().hex[:8]}"
195
+
196
+ await asyncio.sleep(delay)
197
+ yield format_sse_event(ToolCallEvent(
198
+ tool_name=tool_name,
199
+ tool_id=tool_id,
200
+ status="started",
201
+ arguments=args
202
+ ))
203
+
204
+ await asyncio.sleep(delay * 3) # Simulate tool execution
205
+ yield format_sse_event(ToolCallEvent(
206
+ tool_name=tool_name,
207
+ tool_id=tool_id,
208
+ status="completed",
209
+ result=f"Retrieved data for {tool_name}"
210
+ ))
211
+
212
+ # Phase 4: Progress - Generating
213
+ if include_progress:
214
+ await asyncio.sleep(delay)
215
+ yield format_sse_event(ProgressEvent(
216
+ step=2,
217
+ total_steps=len(DEMO_PROGRESS_STEPS),
218
+ label=DEMO_PROGRESS_STEPS[1],
219
+ status="in_progress"
220
+ ))
221
+
222
+ # Phase 5: Stream text content in OpenAI format
223
+ words = DEMO_MARKDOWN_CONTENT.split(" ")
224
+ buffer = ""
225
+ for i, word in enumerate(words):
226
+ buffer += word + " "
227
+ # Emit every few words to simulate realistic streaming
228
+ if len(buffer) > 20 or i == len(words) - 1:
229
+ await asyncio.sleep(delay)
230
+ # OpenAI-compatible format
231
+ chunk = ChatCompletionStreamResponse(
232
+ id=request_id,
233
+ created=created_at,
234
+ model=model,
235
+ choices=[
236
+ ChatCompletionStreamChoice(
237
+ index=0,
238
+ delta=ChatCompletionMessageDelta(
239
+ role="assistant" if is_first_chunk else None,
240
+ content=buffer,
241
+ ),
242
+ finish_reason=None,
243
+ )
244
+ ],
245
+ )
246
+ is_first_chunk = False
247
+ yield f"data: {chunk.model_dump_json()}\n\n"
248
+ buffer = ""
249
+
250
+ # Phase 6: Progress - Formatting
251
+ if include_progress:
252
+ await asyncio.sleep(delay)
253
+ yield format_sse_event(ProgressEvent(
254
+ step=3,
255
+ total_steps=len(DEMO_PROGRESS_STEPS),
256
+ label=DEMO_PROGRESS_STEPS[2],
257
+ status="in_progress"
258
+ ))
259
+
260
+ # Phase 7: Metadata (includes message correlation IDs)
261
+ if include_metadata:
262
+ await asyncio.sleep(delay)
263
+ yield format_sse_event(MetadataEvent(
264
+ # Message correlation IDs
265
+ message_id=message_id,
266
+ in_reply_to=in_reply_to,
267
+ session_id=session_id,
268
+ # Quality indicators
269
+ confidence=0.95,
270
+ sources=["rem/api/routers/chat/sse_events.py", "rem/agentic/agents/sse_simulator.py"],
271
+ # Model info
272
+ model_version=model,
273
+ # Performance metrics
274
+ latency_ms=int(len(words) * delay_ms),
275
+ token_count=len(words),
276
+ # System flags
277
+ flags=["demo_mode"],
278
+ hidden=False,
279
+ extra={"prompt_length": len(prompt)}
280
+ ))
281
+
282
+ # Phase 8: Progress - Preparing actions
283
+ if include_progress:
284
+ await asyncio.sleep(delay)
285
+ yield format_sse_event(ProgressEvent(
286
+ step=4,
287
+ total_steps=len(DEMO_PROGRESS_STEPS),
288
+ label=DEMO_PROGRESS_STEPS[3],
289
+ status="in_progress"
290
+ ))
291
+
292
+ # Phase 9: Action solicitation
293
+ if include_actions:
294
+ await asyncio.sleep(delay)
295
+ yield format_sse_event(ActionRequestEvent(
296
+ card=ActionRequestCard(
297
+ id=f"feedback-{uuid.uuid4().hex[:8]}",
298
+ prompt="Was this SSE demonstration helpful?",
299
+ display_style=ActionDisplayStyle.INLINE,
300
+ actions=[
301
+ ActionSubmit(
302
+ id="helpful-yes",
303
+ title="Yes, very helpful!",
304
+ style=ActionStyle.POSITIVE,
305
+ data={"rating": 5, "feedback": "positive"}
306
+ ),
307
+ ActionSubmit(
308
+ id="helpful-somewhat",
309
+ title="Somewhat",
310
+ style=ActionStyle.DEFAULT,
311
+ data={"rating": 3, "feedback": "neutral"}
312
+ ),
313
+ ActionSubmit(
314
+ id="helpful-no",
315
+ title="Not really",
316
+ style=ActionStyle.SECONDARY,
317
+ data={"rating": 1, "feedback": "negative"}
318
+ ),
319
+ ],
320
+ inputs=[
321
+ InputText(
322
+ id="comments",
323
+ label="Any comments?",
324
+ placeholder="Optional feedback...",
325
+ is_multiline=True,
326
+ max_length=500
327
+ ),
328
+ InputChoiceSet(
329
+ id="use_case",
330
+ label="What's your use case?",
331
+ choices=[
332
+ {"title": "Frontend development", "value": "frontend"},
333
+ {"title": "Testing", "value": "testing"},
334
+ {"title": "Learning", "value": "learning"},
335
+ {"title": "Other", "value": "other"},
336
+ ],
337
+ is_required=False
338
+ ),
339
+ ],
340
+ timeout_ms=60000,
341
+ fallback_text="Please provide feedback on this demo."
342
+ )
343
+ ))
344
+
345
+ # Phase 10: Mark all progress complete
346
+ if include_progress:
347
+ for i, label in enumerate(DEMO_PROGRESS_STEPS):
348
+ await asyncio.sleep(delay / 2)
349
+ yield format_sse_event(ProgressEvent(
350
+ step=i + 1,
351
+ total_steps=len(DEMO_PROGRESS_STEPS),
352
+ label=label,
353
+ status="completed"
354
+ ))
355
+
356
+ # Phase 11: Final chunk with finish_reason
357
+ final_chunk = ChatCompletionStreamResponse(
358
+ id=request_id,
359
+ created=created_at,
360
+ model=model,
361
+ choices=[
362
+ ChatCompletionStreamChoice(
363
+ index=0,
364
+ delta=ChatCompletionMessageDelta(),
365
+ finish_reason="stop",
366
+ )
367
+ ],
368
+ )
369
+ yield f"data: {final_chunk.model_dump_json()}\n\n"
370
+
371
+ # Phase 12: Done event
372
+ await asyncio.sleep(delay)
373
+ yield format_sse_event(DoneEvent(reason="stop"))
374
+
375
+ # Phase 13: OpenAI termination marker
376
+ yield "data: [DONE]\n\n"
377
+
378
+
379
+ async def stream_minimal_demo(
380
+ content: str = "Hello from the simulator!",
381
+ delay_ms: int = 30,
382
+ model: str = "simulator-v1.0.0",
383
+ ) -> AsyncGenerator[str, None]:
384
+ """
385
+ Generate a minimal SSE sequence with just text and done.
386
+
387
+ Useful for simple testing without all event types.
388
+ Uses OpenAI-compatible format for text content.
389
+
390
+ Args:
391
+ content: Text content to stream
392
+ delay_ms: Delay between chunks
393
+ model: Model name for response metadata
394
+
395
+ Yields:
396
+ SSE-formatted strings
397
+ """
398
+ delay = delay_ms / 1000.0
399
+ request_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
400
+ created_at = int(time.time())
401
+ is_first_chunk = True
402
+
403
+ # Stream content word by word in OpenAI format
404
+ words = content.split(" ")
405
+ for word in words:
406
+ await asyncio.sleep(delay)
407
+ chunk = ChatCompletionStreamResponse(
408
+ id=request_id,
409
+ created=created_at,
410
+ model=model,
411
+ choices=[
412
+ ChatCompletionStreamChoice(
413
+ index=0,
414
+ delta=ChatCompletionMessageDelta(
415
+ role="assistant" if is_first_chunk else None,
416
+ content=word + " ",
417
+ ),
418
+ finish_reason=None,
419
+ )
420
+ ],
421
+ )
422
+ is_first_chunk = False
423
+ yield f"data: {chunk.model_dump_json()}\n\n"
424
+
425
+ # Final chunk with finish_reason
426
+ final_chunk = ChatCompletionStreamResponse(
427
+ id=request_id,
428
+ created=created_at,
429
+ model=model,
430
+ choices=[
431
+ ChatCompletionStreamChoice(
432
+ index=0,
433
+ delta=ChatCompletionMessageDelta(),
434
+ finish_reason="stop",
435
+ )
436
+ ],
437
+ )
438
+ yield f"data: {final_chunk.model_dump_json()}\n\n"
439
+
440
+ await asyncio.sleep(delay)
441
+ yield format_sse_event(DoneEvent(reason="stop"))
442
+ yield "data: [DONE]\n\n"
443
+
444
+
445
+ async def stream_error_demo(
446
+ error_after_words: int = 10,
447
+ model: str = "simulator-v1.0.0",
448
+ ) -> AsyncGenerator[str, None]:
449
+ """
450
+ Generate an SSE sequence that ends with an error.
451
+
452
+ Useful for testing error handling in the frontend.
453
+ Uses OpenAI-compatible format for text content.
454
+
455
+ Args:
456
+ error_after_words: Number of words before error
457
+ model: Model name for response metadata
458
+
459
+ Yields:
460
+ SSE-formatted strings including an error event
461
+ """
462
+ from rem.api.routers.chat.sse_events import ErrorEvent
463
+
464
+ request_id = f"chatcmpl-{uuid.uuid4().hex[:24]}"
465
+ created_at = int(time.time())
466
+ is_first_chunk = True
467
+
468
+ content = "This is a demo that will encounter an error during streaming. Watch what happens when things go wrong..."
469
+ words = content.split(" ")
470
+
471
+ for i, word in enumerate(words[:error_after_words]):
472
+ await asyncio.sleep(0.03)
473
+ chunk = ChatCompletionStreamResponse(
474
+ id=request_id,
475
+ created=created_at,
476
+ model=model,
477
+ choices=[
478
+ ChatCompletionStreamChoice(
479
+ index=0,
480
+ delta=ChatCompletionMessageDelta(
481
+ role="assistant" if is_first_chunk else None,
482
+ content=word + " ",
483
+ ),
484
+ finish_reason=None,
485
+ )
486
+ ],
487
+ )
488
+ is_first_chunk = False
489
+ yield f"data: {chunk.model_dump_json()}\n\n"
490
+
491
+ await asyncio.sleep(0.1)
492
+ yield format_sse_event(ErrorEvent(
493
+ code="simulated_error",
494
+ message="This is a simulated error for testing purposes",
495
+ details={"words_sent": error_after_words, "demo": True},
496
+ recoverable=True
497
+ ))
498
+
499
+ yield format_sse_event(DoneEvent(reason="error"))
500
+ yield "data: [DONE]\n\n"
rem/agentic/context.py CHANGED
@@ -72,42 +72,48 @@ class AgentContext(BaseModel):
72
72
  def get_user_id_or_default(
73
73
  user_id: str | None,
74
74
  source: str = "context",
75
- default: str = "default",
76
- ) -> str:
75
+ default: str | None = None,
76
+ ) -> str | None:
77
77
  """
78
- Get user_id or fallback to default with logging.
78
+ Get user_id or return None for anonymous access.
79
79
 
80
- Centralized helper for consistent user_id fallback behavior across
81
- API endpoints, MCP tools, CLI commands, and services.
80
+ User ID convention:
81
+ - user_id is a deterministic UUID5 hash of the user's email address
82
+ - Use rem.utils.user_id.email_to_user_id(email) to generate
83
+ - The JWT's `sub` claim is NOT directly used as user_id
84
+ - Authentication middleware extracts email from JWT and hashes it
85
+
86
+ When user_id is None, queries return data with user_id IS NULL
87
+ (shared/public data). This is intentional - no fake user IDs.
82
88
 
83
89
  Args:
84
- user_id: User identifier (may be None)
90
+ user_id: User identifier (UUID5 hash of email, may be None for anonymous)
85
91
  source: Source of the call (for logging clarity)
86
- default: Default value to use (default: "default")
92
+ default: Explicit default (only for testing, not auto-generated)
87
93
 
88
94
  Returns:
89
- user_id if provided, otherwise default
95
+ user_id if provided, explicit default if provided, otherwise None
90
96
 
91
97
  Example:
92
- # In MCP tool
93
- user_id = AgentContext.get_user_id_or_default(
94
- user_id, source="ask_rem_agent"
95
- )
98
+ # Generate user_id from email (done by auth middleware)
99
+ from rem.utils.user_id import email_to_user_id
100
+ user_id = email_to_user_id("alice@example.com")
101
+ # -> "2c5ea4c0-4067-5fef-942d-0a20124e06d8"
96
102
 
97
- # In API endpoint
103
+ # In MCP tool - anonymous user sees shared data
98
104
  user_id = AgentContext.get_user_id_or_default(
99
- temp_context.user_id, source="chat_completions"
100
- )
101
-
102
- # In CLI command
103
- user_id = AgentContext.get_user_id_or_default(
104
- args.user_id, source="rem ask"
105
+ user_id, source="ask_rem_agent"
105
106
  )
107
+ # Returns None if not authenticated -> queries WHERE user_id IS NULL
106
108
  """
107
- if user_id is None:
108
- logger.debug(f"No user_id provided from {source}, using '{default}'")
109
+ if user_id is not None:
110
+ return user_id
111
+ if default is not None:
112
+ logger.debug(f"Using explicit default user_id '{default}' from {source}")
109
113
  return default
110
- return user_id
114
+ # No fake user IDs - return None for anonymous/unauthenticated
115
+ logger.debug(f"No user_id from {source}, using None (anonymous/shared data)")
116
+ return None
111
117
 
112
118
  @classmethod
113
119
  def from_headers(cls, headers: dict[str, str]) -> "AgentContext":