remdb 0.2.6__py3-none-any.whl → 0.3.103__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

Files changed (82) hide show
  1. rem/__init__.py +129 -2
  2. rem/agentic/README.md +76 -0
  3. rem/agentic/__init__.py +15 -0
  4. rem/agentic/agents/__init__.py +16 -2
  5. rem/agentic/agents/sse_simulator.py +500 -0
  6. rem/agentic/context.py +7 -5
  7. rem/agentic/llm_provider_models.py +301 -0
  8. rem/agentic/providers/phoenix.py +32 -43
  9. rem/agentic/providers/pydantic_ai.py +84 -10
  10. rem/api/README.md +238 -1
  11. rem/api/deps.py +255 -0
  12. rem/api/main.py +70 -22
  13. rem/api/mcp_router/server.py +8 -1
  14. rem/api/mcp_router/tools.py +80 -0
  15. rem/api/middleware/tracking.py +172 -0
  16. rem/api/routers/admin.py +277 -0
  17. rem/api/routers/auth.py +124 -0
  18. rem/api/routers/chat/completions.py +123 -14
  19. rem/api/routers/chat/models.py +7 -3
  20. rem/api/routers/chat/sse_events.py +526 -0
  21. rem/api/routers/chat/streaming.py +468 -45
  22. rem/api/routers/dev.py +81 -0
  23. rem/api/routers/feedback.py +455 -0
  24. rem/api/routers/messages.py +473 -0
  25. rem/api/routers/models.py +78 -0
  26. rem/api/routers/shared_sessions.py +406 -0
  27. rem/auth/middleware.py +126 -27
  28. rem/cli/commands/ask.py +15 -11
  29. rem/cli/commands/configure.py +169 -94
  30. rem/cli/commands/db.py +53 -7
  31. rem/cli/commands/experiments.py +278 -96
  32. rem/cli/commands/process.py +8 -7
  33. rem/cli/commands/scaffold.py +47 -0
  34. rem/cli/commands/schema.py +9 -9
  35. rem/cli/main.py +10 -0
  36. rem/config.py +2 -2
  37. rem/models/core/core_model.py +7 -1
  38. rem/models/entities/__init__.py +21 -0
  39. rem/models/entities/domain_resource.py +38 -0
  40. rem/models/entities/feedback.py +123 -0
  41. rem/models/entities/message.py +30 -1
  42. rem/models/entities/session.py +83 -0
  43. rem/models/entities/shared_session.py +206 -0
  44. rem/models/entities/user.py +10 -3
  45. rem/registry.py +367 -0
  46. rem/schemas/agents/rem.yaml +7 -3
  47. rem/services/content/providers.py +94 -140
  48. rem/services/content/service.py +85 -16
  49. rem/services/dreaming/affinity_service.py +2 -16
  50. rem/services/dreaming/moment_service.py +2 -15
  51. rem/services/embeddings/api.py +20 -13
  52. rem/services/phoenix/EXPERIMENT_DESIGN.md +3 -3
  53. rem/services/phoenix/client.py +252 -19
  54. rem/services/postgres/README.md +29 -10
  55. rem/services/postgres/repository.py +132 -0
  56. rem/services/postgres/schema_generator.py +86 -5
  57. rem/services/rate_limit.py +113 -0
  58. rem/services/rem/README.md +14 -0
  59. rem/services/session/compression.py +17 -1
  60. rem/services/user_service.py +98 -0
  61. rem/settings.py +115 -17
  62. rem/sql/background_indexes.sql +10 -0
  63. rem/sql/migrations/001_install.sql +152 -2
  64. rem/sql/migrations/002_install_models.sql +580 -231
  65. rem/sql/migrations/003_seed_default_user.sql +48 -0
  66. rem/utils/constants.py +97 -0
  67. rem/utils/date_utils.py +228 -0
  68. rem/utils/embeddings.py +17 -4
  69. rem/utils/files.py +167 -0
  70. rem/utils/mime_types.py +158 -0
  71. rem/utils/model_helpers.py +156 -1
  72. rem/utils/schema_loader.py +273 -14
  73. rem/utils/sql_types.py +3 -1
  74. rem/utils/vision.py +9 -14
  75. rem/workers/README.md +14 -14
  76. rem/workers/db_maintainer.py +74 -0
  77. {remdb-0.2.6.dist-info → remdb-0.3.103.dist-info}/METADATA +486 -132
  78. {remdb-0.2.6.dist-info → remdb-0.3.103.dist-info}/RECORD +80 -57
  79. {remdb-0.2.6.dist-info → remdb-0.3.103.dist-info}/WHEEL +1 -1
  80. rem/sql/002_install_models.sql +0 -1068
  81. rem/sql/install_models.sql +0 -1038
  82. {remdb-0.2.6.dist-info → remdb-0.3.103.dist-info}/entry_points.txt +0 -0
@@ -70,7 +70,7 @@ from ....agentic.providers.pydantic_ai import create_agent
70
70
  from ....services.audio.transcriber import AudioTranscriber
71
71
  from ....services.session import SessionMessageStore, reload_session
72
72
  from ....settings import settings
73
- from ....utils.schema_loader import load_agent_schema
73
+ from ....utils.schema_loader import load_agent_schema, load_agent_schema_async
74
74
  from .json_utils import extract_json_resilient
75
75
  from .models import (
76
76
  ChatCompletionChoice,
@@ -79,9 +79,9 @@ from .models import (
79
79
  ChatCompletionUsage,
80
80
  ChatMessage,
81
81
  )
82
- from .streaming import stream_openai_response
82
+ from .streaming import stream_openai_response, stream_simulator_response
83
83
 
84
- router = APIRouter(prefix="/v1", tags=["chat"])
84
+ router = APIRouter(prefix="/api/v1", tags=["chat"])
85
85
 
86
86
  # Default agent schema file
87
87
  DEFAULT_AGENT_SCHEMA = "rem"
@@ -133,9 +133,114 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
133
133
  temp_context = AgentContext.from_headers(dict(request.headers))
134
134
  schema_name = temp_context.agent_schema_uri or DEFAULT_AGENT_SCHEMA
135
135
 
136
+ # Resolve model: use body.model if provided, otherwise settings default
137
+ if body.model is None:
138
+ body.model = settings.llm.default_model
139
+ logger.debug(f"No model specified, using default: {body.model}")
140
+
141
+ # Special handling for simulator schema - no LLM, just generates demo SSE events
142
+ # Check BEFORE loading schema since simulator doesn't need a schema file
143
+ # Still builds full context and saves messages like a real agent
144
+ if schema_name == "simulator":
145
+ logger.info("Using SSE simulator (no LLM)")
146
+
147
+ # Build context just like real agents (loads session history, user context)
148
+ new_messages = [msg.model_dump() for msg in body.messages]
149
+ context, messages = await ContextBuilder.build_from_headers(
150
+ headers=dict(request.headers),
151
+ new_messages=new_messages,
152
+ )
153
+
154
+ # Get the last user message as prompt
155
+ prompt = body.messages[-1].content if body.messages else "demo"
156
+ request_id = f"sim-{uuid.uuid4().hex[:24]}"
157
+
158
+ # Generate message IDs upfront for correlation
159
+ user_message_id = str(uuid.uuid4())
160
+ assistant_message_id = str(uuid.uuid4())
161
+
162
+ # Simulated assistant response content (for persistence)
163
+ simulated_content = (
164
+ f"[SSE Simulator Response]\n\n"
165
+ f"This is a simulated response demonstrating all SSE event types:\n"
166
+ f"- reasoning events (model thinking)\n"
167
+ f"- text_delta events (streamed content)\n"
168
+ f"- progress events (multi-step operations)\n"
169
+ f"- tool_call events (function invocations)\n"
170
+ f"- action_request events (UI solicitation)\n"
171
+ f"- metadata events (confidence, sources, message IDs)\n\n"
172
+ f"Original prompt: {prompt[:100]}{'...' if len(prompt) > 100 else ''}"
173
+ )
174
+
175
+ # Save messages to database (if session_id and postgres enabled)
176
+ if settings.postgres.enabled and context.session_id:
177
+ user_message = {
178
+ "id": user_message_id,
179
+ "role": "user",
180
+ "content": prompt,
181
+ "timestamp": datetime.utcnow().isoformat(),
182
+ }
183
+ assistant_message = {
184
+ "id": assistant_message_id,
185
+ "role": "assistant",
186
+ "content": simulated_content,
187
+ "timestamp": datetime.utcnow().isoformat(),
188
+ }
189
+
190
+ try:
191
+ store = SessionMessageStore(user_id=context.user_id or settings.test.effective_user_id)
192
+ await store.store_session_messages(
193
+ session_id=context.session_id,
194
+ messages=[user_message, assistant_message],
195
+ user_id=context.user_id,
196
+ compress=True,
197
+ )
198
+ logger.info(f"Saved simulator conversation to session {context.session_id}")
199
+ except Exception as e:
200
+ # Log error but don't fail the request - session storage is non-critical
201
+ logger.error(f"Failed to save session messages: {e}", exc_info=True)
202
+
203
+ if body.stream:
204
+ return StreamingResponse(
205
+ stream_simulator_response(
206
+ prompt=prompt,
207
+ model="simulator-v1.0.0",
208
+ # Pass message correlation IDs
209
+ message_id=assistant_message_id,
210
+ in_reply_to=user_message_id,
211
+ session_id=context.session_id,
212
+ ),
213
+ media_type="text/event-stream",
214
+ headers={"Cache-Control": "no-cache", "Connection": "keep-alive"},
215
+ )
216
+ else:
217
+ # Non-streaming simulator returns simple JSON
218
+ return ChatCompletionResponse(
219
+ id=request_id,
220
+ created=int(time.time()),
221
+ model="simulator-v1.0.0",
222
+ choices=[
223
+ ChatCompletionChoice(
224
+ index=0,
225
+ message=ChatMessage(
226
+ role="assistant",
227
+ content=simulated_content,
228
+ ),
229
+ finish_reason="stop",
230
+ )
231
+ ],
232
+ usage=ChatCompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0),
233
+ )
234
+
136
235
  # Load schema using centralized utility
236
+ # Enable database fallback to load dynamic agents stored in schemas table
237
+ # Use async version since we're in an async context (FastAPI endpoint)
238
+ user_id = temp_context.user_id or settings.test.effective_user_id
137
239
  try:
138
- agent_schema = load_agent_schema(schema_name)
240
+ agent_schema = await load_agent_schema_async(
241
+ schema_name,
242
+ user_id=user_id,
243
+ )
139
244
  except FileNotFoundError:
140
245
  # Fallback to default if specified schema not found
141
246
  logger.warning(f"Schema '{schema_name}' not found, falling back to '{DEFAULT_AGENT_SCHEMA}'")
@@ -250,17 +355,21 @@ async def chat_completions(body: ChatCompletionRequest, request: Request):
250
355
  "timestamp": datetime.utcnow().isoformat(),
251
356
  }
252
357
 
253
- # Store messages with compression
254
- store = SessionMessageStore(user_id=context.user_id or "default")
255
-
256
- await store.store_session_messages(
257
- session_id=context.session_id,
258
- messages=[user_message, assistant_message],
259
- user_id=context.user_id,
260
- compress=True,
261
- )
358
+ try:
359
+ # Store messages with compression
360
+ store = SessionMessageStore(user_id=context.user_id or settings.test.effective_user_id)
361
+
362
+ await store.store_session_messages(
363
+ session_id=context.session_id,
364
+ messages=[user_message, assistant_message],
365
+ user_id=context.user_id,
366
+ compress=True,
367
+ )
262
368
 
263
- logger.info(f"Saved conversation to session {context.session_id}")
369
+ logger.info(f"Saved conversation to session {context.session_id}")
370
+ except Exception as e:
371
+ # Log error but don't fail the request - session storage is non-critical
372
+ logger.error(f"Failed to save session messages: {e}", exc_info=True)
264
373
 
265
374
  return ChatCompletionResponse(
266
375
  id=request_id,
@@ -12,6 +12,8 @@ from typing import Literal
12
12
 
13
13
  from pydantic import BaseModel, Field
14
14
 
15
+ from rem.settings import settings
16
+
15
17
 
16
18
  # Request models
17
19
  class ChatMessage(BaseModel):
@@ -52,9 +54,11 @@ class ChatCompletionRequest(BaseModel):
52
54
  Note: Model is specified in body.model (standard OpenAI field), not headers.
53
55
  """
54
56
 
55
- model: str = Field(
56
- default="anthropic:claude-sonnet-4-5-20250929",
57
- description="Model to use (standard OpenAI field)",
57
+ # TODO: default should come from settings.llm.default_model at request time
58
+ # Using None and resolving in endpoint to avoid import-time settings evaluation
59
+ model: str | None = Field(
60
+ default=None,
61
+ description="Model to use. Defaults to LLM__DEFAULT_MODEL from settings.",
58
62
  )
59
63
  messages: list[ChatMessage] = Field(description="Chat conversation history")
60
64
  temperature: float | None = Field(default=None, ge=0, le=2)