remdb 0.3.226__py3-none-any.whl → 0.3.245__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of remdb might be problematic. Click here for more details.
- rem/agentic/README.md +22 -248
- rem/agentic/context.py +13 -2
- rem/agentic/context_builder.py +39 -33
- rem/agentic/providers/pydantic_ai.py +67 -50
- rem/api/mcp_router/resources.py +223 -0
- rem/api/mcp_router/tools.py +25 -9
- rem/api/routers/auth.py +112 -9
- rem/api/routers/chat/child_streaming.py +394 -0
- rem/api/routers/chat/streaming.py +166 -357
- rem/api/routers/chat/streaming_utils.py +327 -0
- rem/api/routers/query.py +5 -14
- rem/cli/commands/ask.py +144 -33
- rem/cli/commands/process.py +9 -1
- rem/cli/commands/query.py +109 -0
- rem/cli/commands/session.py +117 -0
- rem/cli/main.py +2 -0
- rem/models/entities/session.py +1 -0
- rem/services/postgres/repository.py +7 -17
- rem/services/rem/service.py +47 -0
- rem/services/session/compression.py +7 -3
- rem/services/session/pydantic_messages.py +45 -11
- rem/services/session/reload.py +2 -1
- rem/settings.py +43 -0
- rem/sql/migrations/004_cache_system.sql +3 -1
- rem/utils/schema_loader.py +99 -99
- {remdb-0.3.226.dist-info → remdb-0.3.245.dist-info}/METADATA +2 -2
- {remdb-0.3.226.dist-info → remdb-0.3.245.dist-info}/RECORD +29 -26
- {remdb-0.3.226.dist-info → remdb-0.3.245.dist-info}/WHEEL +0 -0
- {remdb-0.3.226.dist-info → remdb-0.3.245.dist-info}/entry_points.txt +0 -0
|
@@ -357,6 +357,9 @@ def _convert_properties_to_prompt(properties: dict[str, Any]) -> str:
|
|
|
357
357
|
definition into natural language guidance that informs the agent
|
|
358
358
|
about the expected response structure without forcing JSON output.
|
|
359
359
|
|
|
360
|
+
IMPORTANT: The 'answer' field is the OUTPUT to the user. All other
|
|
361
|
+
fields are INTERNAL tracking that should NOT appear in the output.
|
|
362
|
+
|
|
360
363
|
Args:
|
|
361
364
|
properties: JSON Schema properties dict
|
|
362
365
|
|
|
@@ -368,45 +371,59 @@ def _convert_properties_to_prompt(properties: dict[str, Any]) -> str:
|
|
|
368
371
|
"answer": {"type": "string", "description": "The answer"},
|
|
369
372
|
"confidence": {"type": "number", "description": "Confidence 0-1"}
|
|
370
373
|
}
|
|
371
|
-
# Returns
|
|
372
|
-
# "## Response Structure\n\nYour response should include:\n- **answer**: The answer\n..."
|
|
374
|
+
# Returns guidance that only answer should be output
|
|
373
375
|
"""
|
|
374
376
|
if not properties:
|
|
375
377
|
return ""
|
|
376
378
|
|
|
377
|
-
|
|
378
|
-
|
|
379
|
-
for
|
|
380
|
-
|
|
381
|
-
|
|
382
|
-
|
|
383
|
-
|
|
384
|
-
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
379
|
+
# Separate answer (output) from other fields (internal tracking)
|
|
380
|
+
answer_field = properties.get("answer")
|
|
381
|
+
internal_fields = {k: v for k, v in properties.items() if k != "answer"}
|
|
382
|
+
|
|
383
|
+
lines = ["## Internal Thinking Structure (DO NOT output these labels)"]
|
|
384
|
+
lines.append("")
|
|
385
|
+
lines.append("Use this structure to organize your thinking, but ONLY output the answer content:")
|
|
386
|
+
lines.append("")
|
|
387
|
+
|
|
388
|
+
# If there's an answer field, emphasize it's the ONLY output
|
|
389
|
+
if answer_field:
|
|
390
|
+
answer_desc = answer_field.get("description", "Your response")
|
|
391
|
+
lines.append(f"**OUTPUT (what the user sees):** {answer_desc}")
|
|
392
|
+
lines.append("")
|
|
393
|
+
|
|
394
|
+
# Document internal fields for tracking/thinking
|
|
395
|
+
if internal_fields:
|
|
396
|
+
lines.append("**INTERNAL (for your tracking only - do NOT include in output):**")
|
|
397
|
+
for field_name, field_def in internal_fields.items():
|
|
398
|
+
field_type = field_def.get("type", "any")
|
|
399
|
+
description = field_def.get("description", "")
|
|
400
|
+
|
|
401
|
+
# Format based on type
|
|
402
|
+
if field_type == "array":
|
|
403
|
+
type_hint = "list"
|
|
404
|
+
elif field_type == "number":
|
|
405
|
+
type_hint = "number"
|
|
406
|
+
if "minimum" in field_def or "maximum" in field_def:
|
|
407
|
+
min_val = field_def.get("minimum", "")
|
|
408
|
+
max_val = field_def.get("maximum", "")
|
|
409
|
+
if min_val != "" and max_val != "":
|
|
410
|
+
type_hint = f"number ({min_val}-{max_val})"
|
|
411
|
+
elif field_type == "boolean":
|
|
412
|
+
type_hint = "yes/no"
|
|
413
|
+
else:
|
|
414
|
+
type_hint = field_type
|
|
398
415
|
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
field_line += f": {description}"
|
|
416
|
+
field_line = f"- {field_name}"
|
|
417
|
+
if type_hint and type_hint != "string":
|
|
418
|
+
field_line += f" ({type_hint})"
|
|
419
|
+
if description:
|
|
420
|
+
field_line += f": {description}"
|
|
405
421
|
|
|
406
|
-
|
|
422
|
+
lines.append(field_line)
|
|
407
423
|
|
|
408
424
|
lines.append("")
|
|
409
|
-
lines.append("
|
|
425
|
+
lines.append("⚠️ CRITICAL: Your response must be ONLY the conversational answer text.")
|
|
426
|
+
lines.append("Do NOT output field names like 'answer:' or 'diverge_output:' - just the response itself.")
|
|
410
427
|
|
|
411
428
|
return "\n".join(lines)
|
|
412
429
|
|
|
@@ -664,26 +681,26 @@ async def create_agent(
|
|
|
664
681
|
|
|
665
682
|
set_agent_resource_attributes(agent_schema=agent_schema)
|
|
666
683
|
|
|
667
|
-
# Extract schema metadata for search_rem tool description suffix
|
|
668
|
-
# This allows entity schemas to add context-specific notes to the search_rem tool
|
|
669
|
-
search_rem_suffix = None
|
|
670
|
-
if metadata:
|
|
671
|
-
# Check for default_search_table in metadata (set by entity schemas)
|
|
672
|
-
extra = agent_schema.get("json_schema_extra", {}) if agent_schema else {}
|
|
673
|
-
default_table = extra.get("default_search_table")
|
|
674
|
-
has_embeddings = extra.get("has_embeddings", False)
|
|
675
|
-
|
|
676
|
-
if default_table:
|
|
677
|
-
# Build description suffix for search_rem
|
|
678
|
-
search_rem_suffix = f"\n\nFor this schema, use `search_rem` to query `{default_table}`. "
|
|
679
|
-
if has_embeddings:
|
|
680
|
-
search_rem_suffix += f"SEARCH works well on {default_table} (has embeddings). "
|
|
681
|
-
search_rem_suffix += f"Example: `SEARCH \"your query\" FROM {default_table} LIMIT 10`"
|
|
682
|
-
|
|
683
684
|
# Add tools from MCP server (in-process, no subprocess)
|
|
684
685
|
# Track loaded MCP servers for resource resolution
|
|
685
686
|
loaded_mcp_server = None
|
|
686
687
|
|
|
688
|
+
# Build map of tool_name → schema description from agent schema tools section
|
|
689
|
+
# This allows agent-specific tool guidance to override/augment MCP tool descriptions
|
|
690
|
+
schema_tool_descriptions: dict[str, str] = {}
|
|
691
|
+
tool_configs = metadata.tools if metadata and hasattr(metadata, 'tools') else []
|
|
692
|
+
for tool_config in tool_configs:
|
|
693
|
+
if hasattr(tool_config, 'name'):
|
|
694
|
+
t_name = tool_config.name
|
|
695
|
+
t_desc = tool_config.description or ""
|
|
696
|
+
else:
|
|
697
|
+
t_name = tool_config.get("name", "")
|
|
698
|
+
t_desc = tool_config.get("description", "")
|
|
699
|
+
# Skip resource URIs (handled separately below)
|
|
700
|
+
if t_name and "://" not in t_name and t_desc:
|
|
701
|
+
schema_tool_descriptions[t_name] = t_desc
|
|
702
|
+
logger.debug(f"Schema tool description for '{t_name}': {len(t_desc)} chars")
|
|
703
|
+
|
|
687
704
|
for server_config in mcp_server_configs:
|
|
688
705
|
server_type = server_config.get("type")
|
|
689
706
|
server_id = server_config.get("id", "mcp-server")
|
|
@@ -708,8 +725,8 @@ async def create_agent(
|
|
|
708
725
|
mcp_tools_dict = await mcp_server.get_tools()
|
|
709
726
|
|
|
710
727
|
for tool_name, tool_func in mcp_tools_dict.items():
|
|
711
|
-
#
|
|
712
|
-
tool_suffix =
|
|
728
|
+
# Get schema description suffix if agent schema defines one for this tool
|
|
729
|
+
tool_suffix = schema_tool_descriptions.get(tool_name)
|
|
713
730
|
|
|
714
731
|
wrapped_tool = create_mcp_tool_wrapper(
|
|
715
732
|
tool_name,
|
|
@@ -718,7 +735,7 @@ async def create_agent(
|
|
|
718
735
|
description_suffix=tool_suffix,
|
|
719
736
|
)
|
|
720
737
|
tools.append(wrapped_tool)
|
|
721
|
-
logger.debug(f"Loaded MCP tool: {tool_name}" + (" (with schema
|
|
738
|
+
logger.debug(f"Loaded MCP tool: {tool_name}" + (" (with schema desc)" if tool_suffix else ""))
|
|
722
739
|
|
|
723
740
|
logger.info(f"Loaded {len(mcp_tools_dict)} tools from MCP server: {server_id} (in-process)")
|
|
724
741
|
|
rem/api/mcp_router/resources.py
CHANGED
|
@@ -542,6 +542,227 @@ def register_status_resources(mcp: FastMCP):
|
|
|
542
542
|
"""
|
|
543
543
|
|
|
544
544
|
|
|
545
|
+
def register_session_resources(mcp: FastMCP):
|
|
546
|
+
"""
|
|
547
|
+
Register session resources for loading conversation history.
|
|
548
|
+
|
|
549
|
+
Args:
|
|
550
|
+
mcp: FastMCP server instance
|
|
551
|
+
"""
|
|
552
|
+
|
|
553
|
+
@mcp.resource("rem://sessions/{session_id}")
|
|
554
|
+
async def get_session_messages(session_id: str) -> str:
|
|
555
|
+
"""
|
|
556
|
+
Load a conversation session by ID.
|
|
557
|
+
|
|
558
|
+
Returns the full message history including user messages, assistant responses,
|
|
559
|
+
and tool calls. Useful for evaluators and analysis agents.
|
|
560
|
+
|
|
561
|
+
Args:
|
|
562
|
+
session_id: Session UUID or identifier
|
|
563
|
+
|
|
564
|
+
Returns:
|
|
565
|
+
Formatted conversation history as markdown string with:
|
|
566
|
+
- Message type (user/assistant/tool)
|
|
567
|
+
- Content
|
|
568
|
+
- Timestamps
|
|
569
|
+
- Tool call details (if any)
|
|
570
|
+
"""
|
|
571
|
+
from ...services.postgres import get_postgres_service
|
|
572
|
+
|
|
573
|
+
pg = get_postgres_service()
|
|
574
|
+
await pg.connect()
|
|
575
|
+
|
|
576
|
+
try:
|
|
577
|
+
# Query messages for session
|
|
578
|
+
query = """
|
|
579
|
+
SELECT id, message_type, content, metadata, created_at
|
|
580
|
+
FROM messages
|
|
581
|
+
WHERE session_id = $1
|
|
582
|
+
ORDER BY created_at ASC
|
|
583
|
+
"""
|
|
584
|
+
messages = await pg.fetch(query, session_id)
|
|
585
|
+
|
|
586
|
+
if not messages:
|
|
587
|
+
return f"# Session Not Found\n\nNo messages found for session_id: {session_id}"
|
|
588
|
+
|
|
589
|
+
# Format output
|
|
590
|
+
output = [f"# Session: {session_id}\n"]
|
|
591
|
+
output.append(f"**Total messages:** {len(messages)}\n")
|
|
592
|
+
|
|
593
|
+
for i, msg in enumerate(messages, 1):
|
|
594
|
+
msg_type = msg['message_type']
|
|
595
|
+
content = msg['content'] or "(empty)"
|
|
596
|
+
created = msg['created_at']
|
|
597
|
+
metadata = msg.get('metadata') or {}
|
|
598
|
+
|
|
599
|
+
# Format based on message type
|
|
600
|
+
if msg_type == 'user':
|
|
601
|
+
output.append(f"\n## [{i}] USER ({created})")
|
|
602
|
+
output.append(f"```\n{content[:1000]}{'...' if len(content) > 1000 else ''}\n```")
|
|
603
|
+
elif msg_type == 'assistant':
|
|
604
|
+
output.append(f"\n## [{i}] ASSISTANT ({created})")
|
|
605
|
+
output.append(f"```\n{content[:1000]}{'...' if len(content) > 1000 else ''}\n```")
|
|
606
|
+
elif msg_type == 'tool':
|
|
607
|
+
tool_name = metadata.get('tool_name', 'unknown')
|
|
608
|
+
output.append(f"\n## [{i}] TOOL: {tool_name} ({created})")
|
|
609
|
+
# Truncate tool results more aggressively
|
|
610
|
+
output.append(f"```json\n{content[:500]}{'...' if len(content) > 500 else ''}\n```")
|
|
611
|
+
else:
|
|
612
|
+
output.append(f"\n## [{i}] {msg_type.upper()} ({created})")
|
|
613
|
+
output.append(f"```\n{content[:500]}{'...' if len(content) > 500 else ''}\n```")
|
|
614
|
+
|
|
615
|
+
return "\n".join(output)
|
|
616
|
+
|
|
617
|
+
finally:
|
|
618
|
+
await pg.disconnect()
|
|
619
|
+
|
|
620
|
+
@mcp.resource("rem://sessions")
|
|
621
|
+
async def list_recent_sessions() -> str:
|
|
622
|
+
"""
|
|
623
|
+
List recent sessions with basic info.
|
|
624
|
+
|
|
625
|
+
Returns the most recent 20 sessions with:
|
|
626
|
+
- Session ID
|
|
627
|
+
- First user message (preview)
|
|
628
|
+
- Message count
|
|
629
|
+
- Timestamp
|
|
630
|
+
"""
|
|
631
|
+
from ...services.postgres import get_postgres_service
|
|
632
|
+
|
|
633
|
+
pg = get_postgres_service()
|
|
634
|
+
await pg.connect()
|
|
635
|
+
|
|
636
|
+
try:
|
|
637
|
+
# Query recent sessions
|
|
638
|
+
query = """
|
|
639
|
+
SELECT
|
|
640
|
+
session_id,
|
|
641
|
+
MIN(created_at) as started_at,
|
|
642
|
+
COUNT(*) as message_count,
|
|
643
|
+
MIN(CASE WHEN message_type = 'user' THEN content END) as first_message
|
|
644
|
+
FROM messages
|
|
645
|
+
WHERE session_id IS NOT NULL
|
|
646
|
+
GROUP BY session_id
|
|
647
|
+
ORDER BY MIN(created_at) DESC
|
|
648
|
+
LIMIT 20
|
|
649
|
+
"""
|
|
650
|
+
sessions = await pg.fetch(query)
|
|
651
|
+
|
|
652
|
+
if not sessions:
|
|
653
|
+
return "# Recent Sessions\n\nNo sessions found."
|
|
654
|
+
|
|
655
|
+
output = ["# Recent Sessions\n"]
|
|
656
|
+
output.append(f"Showing {len(sessions)} most recent sessions:\n")
|
|
657
|
+
|
|
658
|
+
for session in sessions:
|
|
659
|
+
session_id = session['session_id']
|
|
660
|
+
started = session['started_at']
|
|
661
|
+
count = session['message_count']
|
|
662
|
+
first_msg = session['first_message'] or "(no user message)"
|
|
663
|
+
preview = first_msg[:80] + "..." if len(first_msg) > 80 else first_msg
|
|
664
|
+
|
|
665
|
+
output.append(f"\n## {session_id}")
|
|
666
|
+
output.append(f"- **Started:** {started}")
|
|
667
|
+
output.append(f"- **Messages:** {count}")
|
|
668
|
+
output.append(f"- **First message:** {preview}")
|
|
669
|
+
output.append(f"- **Load:** `rem://sessions/{session_id}`")
|
|
670
|
+
|
|
671
|
+
return "\n".join(output)
|
|
672
|
+
|
|
673
|
+
finally:
|
|
674
|
+
await pg.disconnect()
|
|
675
|
+
|
|
676
|
+
|
|
677
|
+
def register_user_resources(mcp: FastMCP):
|
|
678
|
+
"""
|
|
679
|
+
Register user profile resources for on-demand profile loading.
|
|
680
|
+
|
|
681
|
+
Args:
|
|
682
|
+
mcp: FastMCP server instance
|
|
683
|
+
"""
|
|
684
|
+
|
|
685
|
+
@mcp.resource("user://profile/{user_id}")
|
|
686
|
+
async def get_user_profile(user_id: str) -> str:
|
|
687
|
+
"""
|
|
688
|
+
Load a user's profile by ID.
|
|
689
|
+
|
|
690
|
+
Returns the user's profile information including:
|
|
691
|
+
- Email and name
|
|
692
|
+
- Summary (AI-generated profile summary)
|
|
693
|
+
- Interests and preferred topics
|
|
694
|
+
- Activity level
|
|
695
|
+
|
|
696
|
+
This resource is protected - each user can only access their own profile.
|
|
697
|
+
The user_id should match the authenticated user's ID from the JWT token.
|
|
698
|
+
|
|
699
|
+
Args:
|
|
700
|
+
user_id: User UUID from authentication
|
|
701
|
+
|
|
702
|
+
Returns:
|
|
703
|
+
Formatted user profile as markdown string, or error if not found
|
|
704
|
+
"""
|
|
705
|
+
from ...services.postgres import get_postgres_service
|
|
706
|
+
from ...services.postgres.repository import Repository
|
|
707
|
+
from ...models.entities.user import User
|
|
708
|
+
|
|
709
|
+
pg = get_postgres_service()
|
|
710
|
+
await pg.connect()
|
|
711
|
+
|
|
712
|
+
try:
|
|
713
|
+
user_repo = Repository(User, "users", db=pg)
|
|
714
|
+
# Look up user by ID (user_id from JWT is the primary key)
|
|
715
|
+
user = await user_repo.get_by_id(user_id, tenant_id=None)
|
|
716
|
+
|
|
717
|
+
if not user:
|
|
718
|
+
return f"# User Profile Not Found\n\nNo user found with ID: {user_id}"
|
|
719
|
+
|
|
720
|
+
# Build profile output
|
|
721
|
+
output = [f"# User Profile: {user.name or user.email or 'Unknown'}"]
|
|
722
|
+
output.append("")
|
|
723
|
+
|
|
724
|
+
if user.email:
|
|
725
|
+
output.append(f"**Email:** {user.email}")
|
|
726
|
+
|
|
727
|
+
if user.role:
|
|
728
|
+
output.append(f"**Role:** {user.role}")
|
|
729
|
+
|
|
730
|
+
if user.tier:
|
|
731
|
+
output.append(f"**Tier:** {user.tier.value if hasattr(user.tier, 'value') else user.tier}")
|
|
732
|
+
|
|
733
|
+
if user.summary:
|
|
734
|
+
output.append(f"\n## Summary\n{user.summary}")
|
|
735
|
+
|
|
736
|
+
if user.interests:
|
|
737
|
+
output.append(f"\n## Interests\n- " + "\n- ".join(user.interests[:10]))
|
|
738
|
+
|
|
739
|
+
if user.preferred_topics:
|
|
740
|
+
output.append(f"\n## Preferred Topics\n- " + "\n- ".join(user.preferred_topics[:10]))
|
|
741
|
+
|
|
742
|
+
if user.activity_level:
|
|
743
|
+
output.append(f"\n**Activity Level:** {user.activity_level}")
|
|
744
|
+
|
|
745
|
+
if user.last_active_at:
|
|
746
|
+
output.append(f"**Last Active:** {user.last_active_at}")
|
|
747
|
+
|
|
748
|
+
# Add metadata if present (but redact sensitive fields)
|
|
749
|
+
if user.metadata:
|
|
750
|
+
safe_metadata = {k: v for k, v in user.metadata.items()
|
|
751
|
+
if k not in ('login_code', 'password', 'token', 'secret')}
|
|
752
|
+
if safe_metadata:
|
|
753
|
+
output.append(f"\n## Additional Info")
|
|
754
|
+
for key, value in list(safe_metadata.items())[:5]:
|
|
755
|
+
output.append(f"- **{key}:** {value}")
|
|
756
|
+
|
|
757
|
+
return "\n".join(output)
|
|
758
|
+
|
|
759
|
+
except Exception as e:
|
|
760
|
+
return f"# Error Loading Profile\n\nFailed to load user profile: {e}"
|
|
761
|
+
|
|
762
|
+
finally:
|
|
763
|
+
await pg.disconnect()
|
|
764
|
+
|
|
765
|
+
|
|
545
766
|
# Resource dispatcher for read_resource tool
|
|
546
767
|
async def load_resource(uri: str) -> dict | str:
|
|
547
768
|
"""
|
|
@@ -571,6 +792,8 @@ async def load_resource(uri: str) -> dict | str:
|
|
|
571
792
|
register_agent_resources(mcp)
|
|
572
793
|
register_file_resources(mcp)
|
|
573
794
|
register_status_resources(mcp)
|
|
795
|
+
register_session_resources(mcp)
|
|
796
|
+
register_user_resources(mcp)
|
|
574
797
|
|
|
575
798
|
# 1. Try exact match in regular resources
|
|
576
799
|
resources = await mcp.get_resources()
|
rem/api/mcp_router/tools.py
CHANGED
|
@@ -1414,17 +1414,12 @@ async def ask_agent(
|
|
|
1414
1414
|
if Agent.is_model_request_node(node):
|
|
1415
1415
|
async with node.stream(agent_run.ctx) as request_stream:
|
|
1416
1416
|
async for event in request_stream:
|
|
1417
|
-
# Proxy part starts
|
|
1417
|
+
# Proxy part starts (text content only - tool calls handled in is_call_tools_node)
|
|
1418
1418
|
if isinstance(event, PartStartEvent):
|
|
1419
1419
|
from pydantic_ai.messages import ToolCallPart, TextPart
|
|
1420
1420
|
if isinstance(event.part, ToolCallPart):
|
|
1421
|
-
#
|
|
1422
|
-
|
|
1423
|
-
"type": "child_tool_start",
|
|
1424
|
-
"agent_name": agent_name,
|
|
1425
|
-
"tool_name": event.part.tool_name,
|
|
1426
|
-
"arguments": event.part.args if hasattr(event.part, 'args') else None,
|
|
1427
|
-
})
|
|
1421
|
+
# Track tool call for later (args are incomplete at PartStartEvent)
|
|
1422
|
+
# Full args come via FunctionToolCallEvent in is_call_tools_node
|
|
1428
1423
|
child_tool_calls.append({
|
|
1429
1424
|
"tool_name": event.part.tool_name,
|
|
1430
1425
|
"index": event.index,
|
|
@@ -1454,7 +1449,28 @@ async def ask_agent(
|
|
|
1454
1449
|
elif Agent.is_call_tools_node(node):
|
|
1455
1450
|
async with node.stream(agent_run.ctx) as tools_stream:
|
|
1456
1451
|
async for tool_event in tools_stream:
|
|
1457
|
-
|
|
1452
|
+
# FunctionToolCallEvent fires when tool call is parsed
|
|
1453
|
+
# with complete arguments (before execution)
|
|
1454
|
+
if isinstance(tool_event, FunctionToolCallEvent):
|
|
1455
|
+
# Get full arguments from completed tool call
|
|
1456
|
+
tool_args = None
|
|
1457
|
+
if hasattr(tool_event, 'part') and hasattr(tool_event.part, 'args'):
|
|
1458
|
+
raw_args = tool_event.part.args
|
|
1459
|
+
if isinstance(raw_args, str):
|
|
1460
|
+
try:
|
|
1461
|
+
tool_args = json.loads(raw_args)
|
|
1462
|
+
except json.JSONDecodeError:
|
|
1463
|
+
tool_args = {"raw": raw_args}
|
|
1464
|
+
elif isinstance(raw_args, dict):
|
|
1465
|
+
tool_args = raw_args
|
|
1466
|
+
# Push tool start with full arguments
|
|
1467
|
+
await push_event.put({
|
|
1468
|
+
"type": "child_tool_start",
|
|
1469
|
+
"agent_name": agent_name,
|
|
1470
|
+
"tool_name": tool_event.part.tool_name if hasattr(tool_event, 'part') else "unknown",
|
|
1471
|
+
"arguments": tool_args,
|
|
1472
|
+
})
|
|
1473
|
+
elif isinstance(tool_event, FunctionToolResultEvent):
|
|
1458
1474
|
result_content = tool_event.result.content if hasattr(tool_event.result, 'content') else tool_event.result
|
|
1459
1475
|
# Push tool result to parent
|
|
1460
1476
|
await push_event.put({
|
rem/api/routers/auth.py
CHANGED
|
@@ -3,11 +3,12 @@ Authentication Router.
|
|
|
3
3
|
|
|
4
4
|
Supports multiple authentication methods:
|
|
5
5
|
1. Email (passwordless): POST /api/auth/email/send-code, POST /api/auth/email/verify
|
|
6
|
-
2.
|
|
6
|
+
2. Pre-approved codes: POST /api/auth/email/verify (with pre-approved code, no send-code needed)
|
|
7
|
+
3. OAuth (Google, Microsoft): GET /api/auth/{provider}/login, GET /api/auth/{provider}/callback
|
|
7
8
|
|
|
8
9
|
Endpoints:
|
|
9
10
|
- POST /api/auth/email/send-code - Send login code to email
|
|
10
|
-
- POST /api/auth/email/verify - Verify code and create session
|
|
11
|
+
- POST /api/auth/email/verify - Verify code and create session (supports pre-approved codes)
|
|
11
12
|
- GET /api/auth/{provider}/login - Initiate OAuth flow
|
|
12
13
|
- GET /api/auth/{provider}/callback - OAuth callback
|
|
13
14
|
- POST /api/auth/logout - Clear session
|
|
@@ -15,9 +16,39 @@ Endpoints:
|
|
|
15
16
|
|
|
16
17
|
Supported providers:
|
|
17
18
|
- email: Passwordless email login
|
|
19
|
+
- preapproved: Pre-approved codes (bypass email, set via AUTH__PREAPPROVED_CODES)
|
|
18
20
|
- google: Google OAuth 2.0 / OIDC
|
|
19
21
|
- microsoft: Microsoft Entra ID OIDC
|
|
20
22
|
|
|
23
|
+
=============================================================================
|
|
24
|
+
Pre-Approved Code Authentication
|
|
25
|
+
=============================================================================
|
|
26
|
+
|
|
27
|
+
Pre-approved codes allow login without email verification. Useful for:
|
|
28
|
+
- Demo accounts
|
|
29
|
+
- Testing
|
|
30
|
+
- Beta access codes
|
|
31
|
+
- Admin provisioning
|
|
32
|
+
|
|
33
|
+
Configuration:
|
|
34
|
+
AUTH__PREAPPROVED_CODES=A12345,A67890,B11111,B22222
|
|
35
|
+
|
|
36
|
+
Code prefixes:
|
|
37
|
+
A = Admin role (e.g., A12345, AADMIN1)
|
|
38
|
+
B = Normal user role (e.g., B11111, BUSER1)
|
|
39
|
+
|
|
40
|
+
Flow:
|
|
41
|
+
1. User enters email + pre-approved code (no send-code step needed)
|
|
42
|
+
2. POST /api/auth/email/verify with email and code
|
|
43
|
+
3. System validates code against AUTH__PREAPPROVED_CODES
|
|
44
|
+
4. Creates user if not exists, sets role based on prefix
|
|
45
|
+
5. Returns JWT tokens (same as email auth)
|
|
46
|
+
|
|
47
|
+
Example:
|
|
48
|
+
curl -X POST http://localhost:8000/api/auth/email/verify \
|
|
49
|
+
-H "Content-Type: application/json" \
|
|
50
|
+
-d '{"email": "admin@example.com", "code": "A12345"}'
|
|
51
|
+
|
|
21
52
|
=============================================================================
|
|
22
53
|
Email Authentication Access Control
|
|
23
54
|
=============================================================================
|
|
@@ -242,6 +273,12 @@ async def verify_email_code(request: Request, body: EmailVerifyRequest):
|
|
|
242
273
|
"""
|
|
243
274
|
Verify login code and create session with JWT tokens.
|
|
244
275
|
|
|
276
|
+
Supports two authentication methods:
|
|
277
|
+
1. Pre-approved codes: Codes from AUTH__PREAPPROVED_CODES bypass email verification.
|
|
278
|
+
- A prefix = admin role, B prefix = normal user role
|
|
279
|
+
- Creates user if not exists, logs in directly
|
|
280
|
+
2. Email verification: Standard 6-digit code sent via email
|
|
281
|
+
|
|
245
282
|
Args:
|
|
246
283
|
request: FastAPI request
|
|
247
284
|
body: EmailVerifyRequest with email and code
|
|
@@ -249,12 +286,6 @@ async def verify_email_code(request: Request, body: EmailVerifyRequest):
|
|
|
249
286
|
Returns:
|
|
250
287
|
Success status with user info and JWT tokens
|
|
251
288
|
"""
|
|
252
|
-
if not settings.email.is_configured:
|
|
253
|
-
raise HTTPException(
|
|
254
|
-
status_code=501,
|
|
255
|
-
detail="Email authentication is not configured"
|
|
256
|
-
)
|
|
257
|
-
|
|
258
289
|
if not settings.postgres.enabled:
|
|
259
290
|
raise HTTPException(
|
|
260
291
|
status_code=501,
|
|
@@ -264,6 +295,79 @@ async def verify_email_code(request: Request, body: EmailVerifyRequest):
|
|
|
264
295
|
db = PostgresService()
|
|
265
296
|
try:
|
|
266
297
|
await db.connect()
|
|
298
|
+
user_service = UserService(db)
|
|
299
|
+
|
|
300
|
+
# Check for pre-approved code first
|
|
301
|
+
preapproved = settings.auth.check_preapproved_code(body.code)
|
|
302
|
+
if preapproved:
|
|
303
|
+
logger.info(f"Pre-approved code login attempt for {body.email} (role: {preapproved['role']})")
|
|
304
|
+
|
|
305
|
+
# Get or create user with pre-approved role
|
|
306
|
+
user_id = email_to_user_id(body.email)
|
|
307
|
+
user_entity = await user_service.get_user_by_id(user_id)
|
|
308
|
+
|
|
309
|
+
if not user_entity:
|
|
310
|
+
# Create new user with role from pre-approved code
|
|
311
|
+
user_entity = await user_service.get_or_create_user(
|
|
312
|
+
email=body.email,
|
|
313
|
+
name=body.email.split("@")[0],
|
|
314
|
+
tenant_id="default",
|
|
315
|
+
)
|
|
316
|
+
# Update role based on pre-approved code prefix
|
|
317
|
+
user_entity.role = preapproved["role"]
|
|
318
|
+
from ...services.postgres.repository import Repository
|
|
319
|
+
from ...models.entities.user import User
|
|
320
|
+
user_repo = Repository(User, "users", db=db)
|
|
321
|
+
await user_repo.upsert(user_entity)
|
|
322
|
+
logger.info(f"Created user {body.email} with role={preapproved['role']} via pre-approved code")
|
|
323
|
+
else:
|
|
324
|
+
# Update existing user's role if admin code used
|
|
325
|
+
if preapproved["role"] == "admin" and user_entity.role != "admin":
|
|
326
|
+
user_entity.role = "admin"
|
|
327
|
+
from ...services.postgres.repository import Repository
|
|
328
|
+
from ...models.entities.user import User
|
|
329
|
+
user_repo = Repository(User, "users", db=db)
|
|
330
|
+
await user_repo.upsert(user_entity)
|
|
331
|
+
logger.info(f"Upgraded user {body.email} to admin via pre-approved code")
|
|
332
|
+
|
|
333
|
+
# Build user dict for session/JWT
|
|
334
|
+
user_dict = {
|
|
335
|
+
"id": str(user_entity.id),
|
|
336
|
+
"email": body.email,
|
|
337
|
+
"email_verified": True,
|
|
338
|
+
"name": user_entity.name or body.email.split("@")[0],
|
|
339
|
+
"provider": "preapproved",
|
|
340
|
+
"tenant_id": user_entity.tenant_id or "default",
|
|
341
|
+
"tier": user_entity.tier.value if user_entity.tier else "free",
|
|
342
|
+
"role": user_entity.role or preapproved["role"],
|
|
343
|
+
"roles": [user_entity.role or preapproved["role"]],
|
|
344
|
+
}
|
|
345
|
+
|
|
346
|
+
# Generate JWT tokens
|
|
347
|
+
jwt_service = get_jwt_service()
|
|
348
|
+
tokens = jwt_service.create_tokens(user_dict)
|
|
349
|
+
|
|
350
|
+
# Store user in session
|
|
351
|
+
request.session["user"] = user_dict
|
|
352
|
+
|
|
353
|
+
logger.info(f"User authenticated via pre-approved code: {body.email} (role: {user_dict['role']})")
|
|
354
|
+
|
|
355
|
+
return {
|
|
356
|
+
"success": True,
|
|
357
|
+
"message": "Successfully authenticated with pre-approved code!",
|
|
358
|
+
"user": user_dict,
|
|
359
|
+
"access_token": tokens["access_token"],
|
|
360
|
+
"refresh_token": tokens["refresh_token"],
|
|
361
|
+
"token_type": tokens["token_type"],
|
|
362
|
+
"expires_in": tokens["expires_in"],
|
|
363
|
+
}
|
|
364
|
+
|
|
365
|
+
# Standard email verification flow
|
|
366
|
+
if not settings.email.is_configured:
|
|
367
|
+
raise HTTPException(
|
|
368
|
+
status_code=501,
|
|
369
|
+
detail="Email authentication is not configured"
|
|
370
|
+
)
|
|
267
371
|
|
|
268
372
|
# Initialize email auth provider
|
|
269
373
|
email_auth = EmailAuthProvider()
|
|
@@ -288,7 +392,6 @@ async def verify_email_code(request: Request, body: EmailVerifyRequest):
|
|
|
288
392
|
)
|
|
289
393
|
|
|
290
394
|
# Fetch actual user data from database to get role/tier
|
|
291
|
-
user_service = UserService(db)
|
|
292
395
|
try:
|
|
293
396
|
user_entity = await user_service.get_user_by_id(result.user_id)
|
|
294
397
|
if user_entity:
|