remdb 0.3.172__py3-none-any.whl → 0.3.223__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of remdb might be problematic. Click here for more details.

Files changed (57) hide show
  1. rem/agentic/README.md +262 -2
  2. rem/agentic/context.py +173 -0
  3. rem/agentic/context_builder.py +12 -2
  4. rem/agentic/mcp/tool_wrapper.py +39 -16
  5. rem/agentic/providers/pydantic_ai.py +46 -43
  6. rem/agentic/schema.py +2 -2
  7. rem/agentic/tools/rem_tools.py +11 -0
  8. rem/api/main.py +1 -1
  9. rem/api/mcp_router/resources.py +64 -8
  10. rem/api/mcp_router/server.py +31 -24
  11. rem/api/mcp_router/tools.py +621 -166
  12. rem/api/routers/admin.py +30 -4
  13. rem/api/routers/auth.py +114 -15
  14. rem/api/routers/chat/completions.py +66 -18
  15. rem/api/routers/chat/sse_events.py +7 -3
  16. rem/api/routers/chat/streaming.py +254 -22
  17. rem/api/routers/common.py +18 -0
  18. rem/api/routers/dev.py +7 -1
  19. rem/api/routers/feedback.py +9 -1
  20. rem/api/routers/messages.py +176 -38
  21. rem/api/routers/models.py +9 -1
  22. rem/api/routers/query.py +12 -1
  23. rem/api/routers/shared_sessions.py +16 -0
  24. rem/auth/jwt.py +19 -4
  25. rem/auth/middleware.py +42 -28
  26. rem/cli/README.md +62 -0
  27. rem/cli/commands/ask.py +1 -1
  28. rem/cli/commands/db.py +148 -70
  29. rem/cli/commands/process.py +171 -43
  30. rem/models/entities/ontology.py +91 -101
  31. rem/schemas/agents/rem.yaml +1 -1
  32. rem/services/content/service.py +18 -5
  33. rem/services/email/service.py +11 -2
  34. rem/services/embeddings/worker.py +26 -12
  35. rem/services/postgres/__init__.py +28 -3
  36. rem/services/postgres/diff_service.py +57 -5
  37. rem/services/postgres/programmable_diff_service.py +635 -0
  38. rem/services/postgres/pydantic_to_sqlalchemy.py +2 -2
  39. rem/services/postgres/register_type.py +12 -11
  40. rem/services/postgres/repository.py +46 -25
  41. rem/services/postgres/schema_generator.py +5 -5
  42. rem/services/postgres/sql_builder.py +6 -5
  43. rem/services/session/__init__.py +8 -1
  44. rem/services/session/compression.py +40 -2
  45. rem/services/session/pydantic_messages.py +276 -0
  46. rem/settings.py +28 -0
  47. rem/sql/background_indexes.sql +5 -0
  48. rem/sql/migrations/001_install.sql +157 -10
  49. rem/sql/migrations/002_install_models.sql +160 -132
  50. rem/sql/migrations/004_cache_system.sql +7 -275
  51. rem/sql/migrations/migrate_session_id_to_uuid.sql +45 -0
  52. rem/utils/model_helpers.py +101 -0
  53. rem/utils/schema_loader.py +6 -6
  54. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/METADATA +1 -1
  55. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/RECORD +57 -53
  56. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/WHEEL +0 -0
  57. {remdb-0.3.172.dist-info → remdb-0.3.223.dist-info}/entry_points.txt +0 -0
rem/agentic/README.md CHANGED
@@ -716,11 +716,271 @@ curl -X POST http://localhost:8000/api/v1/chat/completions \
716
716
 
717
717
  See `rem/api/README.md` for full SSE event protocol documentation.
718
718
 
719
+ ## Multi-Agent Orchestration
720
+
721
+ REM supports hierarchical agent orchestration where agents can delegate work to other agents via the `ask_agent` tool. This enables complex workflows with specialized agents.
722
+
723
+ ### Architecture
724
+
725
+ ```mermaid
726
+ sequenceDiagram
727
+ participant User
728
+ participant API as Chat API
729
+ participant Orchestrator as Orchestrator Agent
730
+ participant EventSink as Event Sink (Queue)
731
+ participant Child as Child Agent
732
+ participant DB as PostgreSQL
733
+
734
+ User->>API: POST /chat/completions (stream=true)
735
+ API->>API: Create event sink (asyncio.Queue)
736
+ API->>Orchestrator: agent.iter(prompt)
737
+
738
+ loop Streaming Loop
739
+ Orchestrator->>API: PartDeltaEvent (text)
740
+ API->>User: SSE: data: {"delta": {"content": "..."}}
741
+ end
742
+
743
+ Orchestrator->>Orchestrator: Decides to call ask_agent
744
+ Orchestrator->>API: ToolCallPart (ask_agent)
745
+ API->>User: SSE: event: tool_call
746
+
747
+ API->>Child: ask_agent("child_name", input)
748
+ Child->>EventSink: push_event(child_content)
749
+ EventSink->>API: Consume child events
750
+ API->>User: SSE: data: {"delta": {"content": "..."}}
751
+
752
+ Child->>Child: Completes
753
+ Child-->>Orchestrator: Return result
754
+
755
+ Orchestrator->>API: Final response
756
+ API->>DB: Save tool calls
757
+ API->>DB: Save assistant message
758
+ API->>User: SSE: data: [DONE]
759
+ ```
760
+
761
+ ### Event Sink Pattern
762
+
763
+ When an agent delegates to a child via `ask_agent`, the child's streaming events need to bubble up to the parent's stream. This is achieved through an **event sink** pattern using Python's `ContextVar`:
764
+
765
+ ```python
766
+ # context.py
767
+ from contextvars import ContextVar
768
+
769
+ _parent_event_sink: ContextVar["asyncio.Queue | None"] = ContextVar(
770
+ "parent_event_sink", default=None
771
+ )
772
+
773
+ async def push_event(event: Any) -> bool:
774
+ """Push event to parent's event sink if available."""
775
+ sink = _parent_event_sink.get()
776
+ if sink is not None:
777
+ await sink.put(event)
778
+ return True
779
+ return False
780
+ ```
781
+
782
+ The streaming controller sets up the event sink before agent execution:
783
+
784
+ ```python
785
+ # streaming.py
786
+ child_event_sink: asyncio.Queue = asyncio.Queue()
787
+ set_event_sink(child_event_sink)
788
+
789
+ async for node in agent.iter(prompt):
790
+ # Process agent events...
791
+
792
+ # Consume any child events that arrived
793
+ while not child_event_sink.empty():
794
+ child_event = child_event_sink.get_nowait()
795
+ if child_event["type"] == "child_content":
796
+ yield format_sse_content_delta(child_event["content"])
797
+ ```
798
+
799
+ ### ask_agent Tool Implementation
800
+
801
+ The `ask_agent` tool in `mcp_router/tools.py` uses Pydantic AI's streaming iteration:
802
+
803
+ ```python
804
+ async def ask_agent(agent_name: str, input_text: str, ...):
805
+ """Delegate work to another agent."""
806
+
807
+ # Load and create child agent
808
+ schema = await load_agent_schema_async(agent_name, user_id)
809
+ child_agent = await create_agent(context=context, agent_schema_override=schema)
810
+
811
+ # Stream child agent with event proxying
812
+ async with child_agent.iter(prompt) as agent_run:
813
+ async for node in agent_run:
814
+ if Agent.is_model_request_node(node):
815
+ async with node.stream(agent_run.ctx) as request_stream:
816
+ async for event in request_stream:
817
+ if isinstance(event, PartDeltaEvent):
818
+ # Push content to parent's event sink
819
+ await push_event({
820
+ "type": "child_content",
821
+ "agent_name": agent_name,
822
+ "content": event.delta.content_delta,
823
+ })
824
+
825
+ return agent_run.result
826
+ ```
827
+
828
+ ### Pydantic AI Features Used
829
+
830
+ #### 1. Streaming Iteration (`agent.iter()`)
831
+
832
+ Unlike `agent.run()` which blocks until completion, `agent.iter()` provides fine-grained control over the execution flow:
833
+
834
+ ```python
835
+ async with agent.iter(prompt) as agent_run:
836
+ async for node in agent_run:
837
+ if Agent.is_model_request_node(node):
838
+ # Model is generating - stream the response
839
+ async with node.stream(agent_run.ctx) as stream:
840
+ async for event in stream:
841
+ if isinstance(event, PartStartEvent):
842
+ # Tool call starting
843
+ elif isinstance(event, PartDeltaEvent):
844
+ # Content chunk
845
+ elif Agent.is_call_tools_node(node):
846
+ # Tools are being executed
847
+ async with node.stream(agent_run.ctx) as stream:
848
+ async for event in stream:
849
+ if isinstance(event, FunctionToolResultEvent):
850
+ # Tool completed
851
+ ```
852
+
853
+ #### 2. Node Types
854
+
855
+ - **`ModelRequestNode`**: The model is generating a response (text or tool calls)
856
+ - **`CallToolsNode`**: Tools are being executed
857
+ - **`End`**: Agent execution complete
858
+
859
+ #### 3. Event Types
860
+
861
+ - **`PartStartEvent`**: A new part (text or tool call) is starting
862
+ - **`PartDeltaEvent`**: Content chunk for streaming text
863
+ - **`FunctionToolResultEvent`**: Tool execution completed with result
864
+ - **`ToolCallPart`**: Metadata about a tool call (name, arguments)
865
+ - **`TextPart`**: Text content
866
+
867
+ ### Message Persistence
868
+
869
+ All messages are persisted to PostgreSQL for session continuity:
870
+
871
+ ```python
872
+ # streaming.py - after agent completes
873
+ async def save_session_messages(...):
874
+ store = SessionMessageStore(user_id=user_id)
875
+
876
+ # Save each tool call as a tool message
877
+ for tool_call in tool_calls:
878
+ await store.save_message(
879
+ session_id=session_id,
880
+ role="tool",
881
+ content=tool_call.result,
882
+ tool_name=tool_call.name,
883
+ tool_call_id=tool_call.id,
884
+ )
885
+
886
+ # Save the final assistant response
887
+ await store.save_message(
888
+ session_id=session_id,
889
+ role="assistant",
890
+ content=accumulated_content,
891
+ )
892
+ ```
893
+
894
+ Messages are stored with:
895
+ - **Embeddings**: For semantic search across conversation history
896
+ - **Compression**: Long conversations are summarized to manage context window
897
+ - **Session isolation**: Each session maintains its own message history
898
+
899
+ ### Testing Multi-Agent Systems
900
+
901
+ #### Integration Tests
902
+
903
+ Real end-to-end tests without mocking are in `tests/integration/test_ask_agent_streaming.py`:
904
+
905
+ ```python
906
+ class TestAskAgentStreaming:
907
+ async def test_ask_agent_streams_and_saves(self, session_id, user_id):
908
+ """Test delegation via ask_agent."""
909
+ # Uses test_orchestrator which always delegates to test_responder
910
+ agent = await create_agent(context=context, agent_schema_override=schema)
911
+
912
+ chunks = []
913
+ async for chunk in stream_openai_response_with_save(
914
+ agent=agent,
915
+ prompt="Hello, please delegate this",
916
+ ...
917
+ ):
918
+ chunks.append(chunk)
919
+
920
+ # Verify streaming worked
921
+ assert len(content_chunks) > 0
922
+
923
+ # Verify persistence
924
+ messages = await store.load_session_messages(session_id)
925
+ assert len([m for m in messages if m["role"] == "assistant"]) == 1
926
+ assert len([m for m in messages if m["tool_name"] == "ask_agent"]) >= 1
927
+
928
+ async def test_multi_turn_saves_all_assistant_messages(self, session_id, user_id):
929
+ """Test that each turn saves its own assistant message.
930
+
931
+ This catches scoping bugs like accumulated_content not being
932
+ properly scoped per-turn.
933
+ """
934
+ turn_prompts = [
935
+ "Hello, how are you?",
936
+ "Tell me something interesting",
937
+ "Thanks for chatting!",
938
+ ]
939
+
940
+ for prompt in turn_prompts:
941
+ async for chunk in stream_openai_response_with_save(...):
942
+ pass
943
+
944
+ # Each turn should save an assistant message
945
+ messages = await store.load_session_messages(session_id)
946
+ assistant_msgs = [m for m in messages if m["role"] == "assistant"]
947
+ assert len(assistant_msgs) == 3
948
+ ```
949
+
950
+ #### Test Agent Schemas
951
+
952
+ Test agents are defined in `tests/data/schemas/agents/`:
953
+
954
+ - **`test_orchestrator.yaml`**: Always delegates via `ask_agent`
955
+ - **`test_responder.yaml`**: Simple agent that responds directly
956
+
957
+ ```yaml
958
+ # test_orchestrator.yaml
959
+ type: object
960
+ description: |
961
+ You are a TEST ORCHESTRATOR that ALWAYS delegates to another agent.
962
+ Call ask_agent with agent_name="test_responder" on EVERY turn.
963
+ json_schema_extra:
964
+ kind: agent
965
+ name: test_orchestrator
966
+ tools:
967
+ - name: ask_agent
968
+ mcp_server: rem
969
+ ```
970
+
971
+ #### Running Integration Tests
972
+
973
+ ```bash
974
+ # Run individually (recommended due to async isolation)
975
+ POSTGRES__CONNECTION_STRING="postgresql://rem:rem@localhost:5050/rem" \
976
+ uv run pytest tests/integration/test_ask_agent_streaming.py::TestAskAgentStreaming::test_multi_turn_saves_all_assistant_messages -v -s
977
+ ```
978
+
719
979
  ## Future Work
720
980
 
721
981
  - [ ] Phoenix evaluator integration
722
982
  - [ ] Agent schema registry (load schemas by URI)
723
983
  - [ ] Schema validation and versioning
724
- - [ ] Multi-turn conversation management
725
- - [ ] Agent composition (agents calling agents)
984
+ - [x] Multi-turn conversation management
985
+ - [x] Agent composition (agents calling agents)
726
986
  - [ ] Alternative provider implementations (if needed)
rem/agentic/context.py CHANGED
@@ -22,14 +22,153 @@ Key Design Pattern:
22
22
  - Enables session tracking across API, CLI, and test execution
23
23
  - Supports header-based configuration override (model, schema URI)
24
24
  - Clean separation: context (who/what) vs agent (how)
25
+
26
+ Multi-Agent Context Propagation:
27
+ - ContextVar (_current_agent_context) threads context through nested agent calls
28
+ - Parent context is automatically available to child agents via get_current_context()
29
+ - Use agent_context_scope() context manager for scoped context setting
30
+ - Child agents inherit user_id, tenant_id, session_id, is_eval from parent
25
31
  """
26
32
 
33
+ import asyncio
34
+ from contextlib import contextmanager
35
+ from contextvars import ContextVar
36
+ from typing import Any, Generator
37
+
27
38
  from loguru import logger
28
39
  from pydantic import BaseModel, Field
29
40
 
30
41
  from ..settings import settings
31
42
 
32
43
 
44
+ # Thread-local context for current agent execution
45
+ # This enables context propagation through nested agent calls (multi-agent)
46
+ _current_agent_context: ContextVar["AgentContext | None"] = ContextVar(
47
+ "current_agent_context", default=None
48
+ )
49
+
50
+ # Event sink for streaming child agent events to parent
51
+ # When set, child agents (via ask_agent) should push their events here
52
+ # for the parent's streaming loop to proxy to the client
53
+ _parent_event_sink: ContextVar["asyncio.Queue | None"] = ContextVar(
54
+ "parent_event_sink", default=None
55
+ )
56
+
57
+
58
+ def get_current_context() -> "AgentContext | None":
59
+ """
60
+ Get the current agent context from context var.
61
+
62
+ Used by MCP tools (like ask_agent) to inherit context from parent agent.
63
+ Returns None if no context is set (e.g., direct CLI invocation without context).
64
+
65
+ Example:
66
+ # In an MCP tool
67
+ parent_context = get_current_context()
68
+ if parent_context:
69
+ # Inherit user_id, session_id, etc. from parent
70
+ child_context = parent_context.child_context(agent_schema_uri="child-agent")
71
+ """
72
+ return _current_agent_context.get()
73
+
74
+
75
+ def set_current_context(ctx: "AgentContext | None") -> None:
76
+ """
77
+ Set the current agent context.
78
+
79
+ Called by streaming layer before agent execution.
80
+ Should be cleared (set to None) after execution completes.
81
+ """
82
+ _current_agent_context.set(ctx)
83
+
84
+
85
+ @contextmanager
86
+ def agent_context_scope(ctx: "AgentContext") -> Generator["AgentContext", None, None]:
87
+ """
88
+ Context manager for scoped context setting.
89
+
90
+ Automatically restores previous context when exiting scope.
91
+ Safe for nested agent calls - each level preserves its parent's context.
92
+
93
+ Example:
94
+ context = AgentContext(user_id="user-123")
95
+ with agent_context_scope(context):
96
+ # Context is available via get_current_context()
97
+ result = await agent.run(...)
98
+ # Previous context (or None) is restored
99
+ """
100
+ previous = _current_agent_context.get()
101
+ _current_agent_context.set(ctx)
102
+ try:
103
+ yield ctx
104
+ finally:
105
+ _current_agent_context.set(previous)
106
+
107
+
108
+ # =============================================================================
109
+ # Event Sink for Streaming Multi-Agent Delegation
110
+ # =============================================================================
111
+
112
+
113
+ def get_event_sink() -> "asyncio.Queue | None":
114
+ """
115
+ Get the parent's event sink for streaming child events.
116
+
117
+ Used by ask_agent to push child agent events to the parent's stream.
118
+ Returns None if not in a streaming context.
119
+ """
120
+ return _parent_event_sink.get()
121
+
122
+
123
+ def set_event_sink(sink: "asyncio.Queue | None") -> None:
124
+ """Set the event sink for child agents to push events to."""
125
+ _parent_event_sink.set(sink)
126
+
127
+
128
+ @contextmanager
129
+ def event_sink_scope(sink: "asyncio.Queue") -> Generator["asyncio.Queue", None, None]:
130
+ """
131
+ Context manager for scoped event sink setting.
132
+
133
+ Used by streaming layer to set up event proxying before tool execution.
134
+ Child agents (via ask_agent) will push their events to this sink.
135
+
136
+ Example:
137
+ event_queue = asyncio.Queue()
138
+ with event_sink_scope(event_queue):
139
+ # ask_agent will push child events to event_queue
140
+ async for event in tools_stream:
141
+ ...
142
+ # Also consume from event_queue
143
+ """
144
+ previous = _parent_event_sink.get()
145
+ _parent_event_sink.set(sink)
146
+ try:
147
+ yield sink
148
+ finally:
149
+ _parent_event_sink.set(previous)
150
+
151
+
152
+ async def push_event(event: Any) -> bool:
153
+ """
154
+ Push an event to the parent's event sink (if available).
155
+
156
+ Used by ask_agent to proxy child agent events to the parent's stream.
157
+ Returns True if event was pushed, False if no sink available.
158
+
159
+ Args:
160
+ event: Any streaming event (ToolCallEvent, content chunk, etc.)
161
+
162
+ Returns:
163
+ True if event was pushed to sink, False otherwise
164
+ """
165
+ sink = _parent_event_sink.get()
166
+ if sink is not None:
167
+ await sink.put(event)
168
+ return True
169
+ return False
170
+
171
+
33
172
  class AgentContext(BaseModel):
34
173
  """
35
174
  Session and configuration context for agent execution.
@@ -85,6 +224,40 @@ class AgentContext(BaseModel):
85
224
 
86
225
  model_config = {"populate_by_name": True}
87
226
 
227
+ def child_context(
228
+ self,
229
+ agent_schema_uri: str | None = None,
230
+ model_override: str | None = None,
231
+ ) -> "AgentContext":
232
+ """
233
+ Create a child context for nested agent calls.
234
+
235
+ Inherits user_id, tenant_id, session_id, is_eval from parent.
236
+ Allows overriding agent_schema_uri and default_model for the child.
237
+
238
+ Args:
239
+ agent_schema_uri: Agent schema for the child agent (required for lineage)
240
+ model_override: Optional model override for child agent
241
+
242
+ Returns:
243
+ New AgentContext for the child agent
244
+
245
+ Example:
246
+ parent_context = get_current_context()
247
+ child_context = parent_context.child_context(
248
+ agent_schema_uri="sentiment-analyzer"
249
+ )
250
+ agent = await create_agent(context=child_context)
251
+ """
252
+ return AgentContext(
253
+ user_id=self.user_id,
254
+ tenant_id=self.tenant_id,
255
+ session_id=self.session_id,
256
+ default_model=model_override or self.default_model,
257
+ agent_schema_uri=agent_schema_uri or self.agent_schema_uri,
258
+ is_eval=self.is_eval,
259
+ )
260
+
88
261
  @staticmethod
89
262
  def get_user_id_or_default(
90
263
  user_id: str | None,
@@ -217,11 +217,21 @@ class ContextBuilder:
217
217
  )
218
218
 
219
219
  # Convert to ContextMessage format
220
+ # For tool messages, wrap content with clear markers so the agent
221
+ # can see previous tool results when the prompt is concatenated
220
222
  for msg_dict in session_history:
223
+ role = msg_dict["role"]
224
+ content = msg_dict["content"]
225
+
226
+ if role == "tool":
227
+ # Wrap tool results with clear markers for visibility
228
+ tool_name = msg_dict.get("tool_name", "unknown")
229
+ content = f"[TOOL RESULT: {tool_name}]\n{content}\n[/TOOL RESULT]"
230
+
221
231
  messages.append(
222
232
  ContextMessage(
223
- role=msg_dict["role"],
224
- content=msg_dict["content"],
233
+ role=role,
234
+ content=content,
225
235
  )
226
236
  )
227
237
 
@@ -116,7 +116,7 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
116
116
  the artificial MCP distinction between tools and resources.
117
117
 
118
118
  Supports both:
119
- - Concrete URIs: "rem://schemas" -> tool with no parameters
119
+ - Concrete URIs: "rem://agents" -> tool with no parameters
120
120
  - Template URIs: "patient-profile://field/{field_key}" -> tool with field_key parameter
121
121
 
122
122
  Args:
@@ -131,7 +131,7 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
131
131
 
132
132
  Example:
133
133
  # Concrete URI -> no-param tool
134
- tool = create_resource_tool("rem://schemas", "List all agent schemas")
134
+ tool = create_resource_tool("rem://agents", "List all agent schemas")
135
135
 
136
136
  # Template URI -> parameterized tool
137
137
  tool = create_resource_tool("patient-profile://field/{field_key}", "Get field definition", mcp_server=mcp)
@@ -161,6 +161,11 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
161
161
  param_desc = ", ".join(template_vars)
162
162
  description = f"{description}\n\nParameters: {param_desc}"
163
163
 
164
+ # Capture mcp_server reference at tool creation time (for closure)
165
+ # This ensures the correct server is used even if called later
166
+ _captured_mcp_server = mcp_server
167
+ _captured_uri = uri # Also capture URI for consistent logging
168
+
164
169
  if template_vars:
165
170
  # Template URI -> create parameterized tool
166
171
  async def wrapper(**kwargs: Any) -> str:
@@ -168,13 +173,17 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
168
173
  import asyncio
169
174
  import inspect
170
175
 
176
+ logger.debug(f"Resource tool invoked: uri={_captured_uri}, kwargs={kwargs}, mcp_server={'set' if _captured_mcp_server else 'None'}")
177
+
171
178
  # Try to resolve from MCP server's resource templates first
172
- if mcp_server is not None:
179
+ if _captured_mcp_server is not None:
173
180
  try:
174
181
  # Get resource templates from MCP server
175
- templates = await mcp_server.get_resource_templates()
176
- if uri in templates:
177
- template = templates[uri]
182
+ templates = await _captured_mcp_server.get_resource_templates()
183
+ logger.debug(f"MCP server templates: {list(templates.keys())}")
184
+ if _captured_uri in templates:
185
+ template = templates[_captured_uri]
186
+ logger.debug(f"Found template for {_captured_uri}, calling fn with kwargs={kwargs}")
178
187
  # Call the template's underlying function directly
179
188
  # The fn expects the template variables as kwargs
180
189
  fn_result = template.fn(**kwargs)
@@ -184,17 +193,22 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
184
193
  if isinstance(fn_result, str):
185
194
  return fn_result
186
195
  return json.dumps(fn_result, indent=2)
196
+ else:
197
+ logger.warning(f"Template {_captured_uri} not found in MCP server templates: {list(templates.keys())}")
187
198
  except Exception as e:
188
- logger.warning(f"Failed to resolve resource {uri} from MCP server: {e}")
199
+ logger.warning(f"Failed to resolve resource {_captured_uri} from MCP server: {e}", exc_info=True)
200
+ else:
201
+ logger.warning(f"No MCP server provided for resource tool {_captured_uri}, using fallback")
189
202
 
190
203
  # Fallback: substitute template variables and use load_resource
191
- resolved_uri = uri
204
+ resolved_uri = _captured_uri
192
205
  for var in template_vars:
193
206
  if var in kwargs:
194
207
  resolved_uri = resolved_uri.replace(f"{{{var}}}", str(kwargs[var]))
195
208
  else:
196
209
  return json.dumps({"error": f"Missing required parameter: {var}"})
197
210
 
211
+ logger.debug(f"Using fallback load_resource for resolved URI: {resolved_uri}")
198
212
  from rem.api.mcp_router.resources import load_resource
199
213
  result = await load_resource(resolved_uri)
200
214
  if isinstance(result, str):
@@ -208,7 +222,7 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
208
222
  wrapper.__annotations__ = {var: str for var in template_vars}
209
223
  wrapper.__annotations__['return'] = str
210
224
 
211
- logger.info(f"Built parameterized resource tool: {func_name} (uri: {uri}, params: {template_vars})")
225
+ logger.info(f"Built parameterized resource tool: {func_name} (uri: {uri}, params: {template_vars}, mcp_server={'provided' if mcp_server else 'None'})")
212
226
  else:
213
227
  # Concrete URI -> no-param tool
214
228
  async def wrapper(**kwargs: Any) -> str:
@@ -219,12 +233,16 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
219
233
  if kwargs:
220
234
  logger.warning(f"Resource tool {func_name} called with unexpected kwargs: {list(kwargs.keys())}")
221
235
 
236
+ logger.debug(f"Concrete resource tool invoked: uri={_captured_uri}, mcp_server={'set' if _captured_mcp_server else 'None'}")
237
+
222
238
  # Try to resolve from MCP server's resources first
223
- if mcp_server is not None:
239
+ if _captured_mcp_server is not None:
224
240
  try:
225
- resources = await mcp_server.get_resources()
226
- if uri in resources:
227
- resource = resources[uri]
241
+ resources = await _captured_mcp_server.get_resources()
242
+ logger.debug(f"MCP server resources: {list(resources.keys())}")
243
+ if _captured_uri in resources:
244
+ resource = resources[_captured_uri]
245
+ logger.debug(f"Found resource for {_captured_uri}")
228
246
  # Call the resource's underlying function
229
247
  fn_result = resource.fn()
230
248
  if inspect.iscoroutine(fn_result):
@@ -232,12 +250,17 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
232
250
  if isinstance(fn_result, str):
233
251
  return fn_result
234
252
  return json.dumps(fn_result, indent=2)
253
+ else:
254
+ logger.warning(f"Resource {_captured_uri} not found in MCP server resources: {list(resources.keys())}")
235
255
  except Exception as e:
236
- logger.warning(f"Failed to resolve resource {uri} from MCP server: {e}")
256
+ logger.warning(f"Failed to resolve resource {_captured_uri} from MCP server: {e}", exc_info=True)
257
+ else:
258
+ logger.warning(f"No MCP server provided for resource tool {_captured_uri}, using fallback")
237
259
 
238
260
  # Fallback to load_resource
261
+ logger.debug(f"Using fallback load_resource for URI: {_captured_uri}")
239
262
  from rem.api.mcp_router.resources import load_resource
240
- result = await load_resource(uri)
263
+ result = await load_resource(_captured_uri)
241
264
  if isinstance(result, str):
242
265
  return result
243
266
  return json.dumps(result, indent=2)
@@ -245,6 +268,6 @@ def create_resource_tool(uri: str, usage: str = "", mcp_server: Any = None) -> T
245
268
  wrapper.__name__ = func_name
246
269
  wrapper.__doc__ = description
247
270
 
248
- logger.info(f"Built resource tool: {func_name} (uri: {uri})")
271
+ logger.info(f"Built resource tool: {func_name} (uri: {uri}, mcp_server={'provided' if mcp_server else 'None'})")
249
272
 
250
273
  return Tool(wrapper)